Calls¶
When working with Large Language Model (LLM) APIs in Mirascope, a "call" refers to making a request to a LLM provider's API with a particular setting and prompt.
The call
decorator is a core feature of the Mirascope library, designed to simplify and streamline interactions with various LLM providers. This powerful tool allows you to transform prompt templates written as Python functions into LLM API calls with minimal boilerplate code while providing type safety and consistency across different providers.
We currently support OpenAI, Anthropic, Google (Gemini/Vertex), Groq, xAI, Mistral, Cohere, LiteLLM, Azure AI, and Amazon Bedrock.
If there are any providers we don't yet support that you'd like to see supported, let us know!
API Documentation
Basic Usage and Syntax¶
Let's take a look at a basic example using Mirascope vs. official provider SDKs:
Mirascope
from mirascope import BaseMessageParam, llm
@llm.call(provider="openai", model="gpt-4o-mini")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseMessageParam, llm
@llm.call(provider="anthropic", model="claude-3-5-sonnet-latest")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseMessageParam, llm
@llm.call(provider="google", model="gemini-2.0-flash")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseMessageParam, llm
@llm.call(provider="groq", model="llama-3.1-70b-versatile")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseMessageParam, llm
@llm.call(provider="mistral", model="mistral-large-latest")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseMessageParam, llm
@llm.call(provider="cohere", model="command-r-plus")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseMessageParam, llm
@llm.call(provider="litellm", model="gpt-4o-mini")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseMessageParam, llm
@llm.call(provider="azure", model="gpt-4o-mini")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseMessageParam, llm
@llm.call(provider="bedrock", model="amazon.nova-lite-v1:0")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
Official SDK
from openai import OpenAI
client = OpenAI()
def recommend_book(genre: str) -> str:
completion = client.chat.completions.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": f"Recommend a {genre} book"}],
)
return str(completion.choices[0].message.content)
output = recommend_book("fantasy")
print(output)
from anthropic import Anthropic
client = Anthropic()
def recommend_book(genre: str) -> str:
message = client.messages.create(
model="claude-3-5-sonnet-latest",
messages=[{"role": "user", "content": f"Recommend a {genre} book"}],
max_tokens=1024,
)
block = message.content[0]
return block.text if block.type == "text" else ""
output = recommend_book("fantasy")
print(output)
from google.genai import Client
client = Client()
def recommend_book(genre: str) -> str:
generation = client.models.generate_content(
model="gemini-2.0-flash",
contents={"role": "user", "parts": [{"text": f"Recommend a {genre} book"}]}, # pyright: ignore [reportArgumentType]
)
return generation.candidates[0].content.parts[0].text # pyright: ignore [reportOptionalSubscript, reportOptionalMemberAccess, reportReturnType]
output = recommend_book("fantasy")
print(output)
from groq import Groq
client = Groq()
def recommend_book(genre: str) -> str:
completion = client.chat.completions.create(
model="llama-3.1-70b-versatile",
messages=[{"role": "user", "content": f"Recommend a {genre} book"}],
)
return str(completion.choices[0].message.content)
output = recommend_book("fantasy")
print(output)
import os
from typing import cast
from mistralai import Mistral
client = Mistral(api_key=os.environ["MISTRAL_API_KEY"])
def recommend_book(genre: str) -> str | None:
completion = client.chat.complete(
model="mistral-large-latest",
messages=[{"role": "user", "content": f"Recommend a {genre} book"}],
)
if completion and (choices := completion.choices):
return cast(str, choices[0].message.content)
output = recommend_book("fantasy")
print(output)
from azure.ai.inference import ChatCompletionsClient
from azure.ai.inference.models import ChatRequestMessage
from azure.core.credentials import AzureKeyCredential
client = ChatCompletionsClient(
endpoint="YOUR_ENDPOINT", credential=AzureKeyCredential("YOUR_KEY")
)
def recommend_book(genre: str) -> str:
completion = client.complete(
model="gpt-4o-mini",
messages=[
ChatRequestMessage({"role": "user", "content": f"Recommend a {genre} book"})
],
)
message = completion.choices[0].message
return message.content if message.content is not None else ""
output = recommend_book("fantasy")
print(output)
import boto3
bedrock_client = boto3.client(service_name="bedrock-runtime")
def recommend_book(genre: str) -> str:
messages = [{"role": "user", "content": [{"text": f"Recommend a {genre} book"}]}]
response = bedrock_client.converse(
modelId="amazon.nova-lite-v1:0",
messages=messages,
inferenceConfig={"maxTokens": 1024},
)
output_message = response["output"]["message"]
content = ""
for content_piece in output_message["content"]:
if "text" in content_piece:
content += content_piece["text"]
return content
output = recommend_book("fantasy")
print(output)
Notice how Mirascope makes calls more readable by reducing boilerplate and standardizing interactions with LLM providers.
The llm.call
decorator accepts provider
and model
arguments and returns a provider-agnostic CallResponse
instance that provides a consistent interface regardless of the underlying provider. You can find more information on CallResponse
in the section below on handling responses.
Note the @prompt_template
decorator is optional unless you're using string templates.
Runtime Provider Overrides¶
You can override provider settings at runtime using llm.override
. This takes a function decorated with llm.call
and lets you specify:
provider
: Change the provider being calledmodel
: Use a different modelcall_params
: Override call parameters like temperatureclient
: Use a different client instance
When overriding with a specific provider
, you must specify the model
parameter.
from mirascope import llm
@llm.call(provider="openai", model="gpt-4o-mini")
def recommend_book(genre: str) -> str:
return f"Recommend a {genre} book"
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="anthropic",
model="claude-3-5-sonnet-latest",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
@llm.call(provider="anthropic", model="claude-3-5-sonnet-latest")
def recommend_book(genre: str) -> str:
return f"Recommend a {genre} book"
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="openai",
model="gpt-4o-mini",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
@llm.call(provider="google", model="gemini-2.0-flash")
def recommend_book(genre: str) -> str:
return f"Recommend a {genre} book"
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="anthropic",
model="claude-3-5-sonnet-latest",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
@llm.call(provider="groq", model="llama-3.1-70b-versatile")
def recommend_book(genre: str) -> str:
return f"Recommend a {genre} book"
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="anthropic",
model="claude-3-5-sonnet-latest",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
@llm.call(provider="xai", model="grok-3")
def recommend_book(genre: str) -> str:
return f"Recommend a {genre} book"
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="anthropic",
model="claude-3-5-sonnet-latest",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
@llm.call(provider="mistral", model="mistral-large-latest")
def recommend_book(genre: str) -> str:
return f"Recommend a {genre} book"
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="anthropic",
model="claude-3-5-sonnet-latest",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
@llm.call(provider="cohere", model="command-r-plus")
def recommend_book(genre: str) -> str:
return f"Recommend a {genre} book"
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="anthropic",
model="claude-3-5-sonnet-latest",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
@llm.call(provider="litellm", model="gpt-4o-mini")
def recommend_book(genre: str) -> str:
return f"Recommend a {genre} book"
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="anthropic",
model="claude-3-5-sonnet-latest",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
@llm.call(provider="azure", model="gpt-4o-mini")
def recommend_book(genre: str) -> str:
return f"Recommend a {genre} book"
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="anthropic",
model="claude-3-5-sonnet-latest",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
@llm.call(provider="bedrock", model="amazon.nova-lite-v1:0")
def recommend_book(genre: str) -> str:
return f"Recommend a {genre} book"
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="anthropic",
model="claude-3-5-sonnet-latest",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
from mirascope.core import Messages
@llm.call(provider="openai", model="gpt-4o-mini")
def recommend_book(genre: str) -> Messages.Type:
return Messages.User(f"Recommend a {genre} book")
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="anthropic",
model="claude-3-5-sonnet-latest",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
from mirascope.core import Messages
@llm.call(provider="anthropic", model="claude-3-5-sonnet-latest")
def recommend_book(genre: str) -> Messages.Type:
return Messages.User(f"Recommend a {genre} book")
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="openai",
model="gpt-4o-mini",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
from mirascope.core import Messages
@llm.call(provider="google", model="gemini-2.0-flash")
def recommend_book(genre: str) -> Messages.Type:
return Messages.User(f"Recommend a {genre} book")
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="anthropic",
model="claude-3-5-sonnet-latest",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
from mirascope.core import Messages
@llm.call(provider="groq", model="llama-3.1-70b-versatile")
def recommend_book(genre: str) -> Messages.Type:
return Messages.User(f"Recommend a {genre} book")
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="anthropic",
model="claude-3-5-sonnet-latest",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
from mirascope.core import Messages
@llm.call(provider="xai", model="grok-3")
def recommend_book(genre: str) -> Messages.Type:
return Messages.User(f"Recommend a {genre} book")
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="anthropic",
model="claude-3-5-sonnet-latest",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
from mirascope.core import Messages
@llm.call(provider="mistral", model="mistral-large-latest")
def recommend_book(genre: str) -> Messages.Type:
return Messages.User(f"Recommend a {genre} book")
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="anthropic",
model="claude-3-5-sonnet-latest",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
from mirascope.core import Messages
@llm.call(provider="cohere", model="command-r-plus")
def recommend_book(genre: str) -> Messages.Type:
return Messages.User(f"Recommend a {genre} book")
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="anthropic",
model="claude-3-5-sonnet-latest",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
from mirascope.core import Messages
@llm.call(provider="litellm", model="gpt-4o-mini")
def recommend_book(genre: str) -> Messages.Type:
return Messages.User(f"Recommend a {genre} book")
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="anthropic",
model="claude-3-5-sonnet-latest",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
from mirascope.core import Messages
@llm.call(provider="azure", model="gpt-4o-mini")
def recommend_book(genre: str) -> Messages.Type:
return Messages.User(f"Recommend a {genre} book")
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="anthropic",
model="claude-3-5-sonnet-latest",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
from mirascope.core import Messages
@llm.call(provider="bedrock", model="amazon.nova-lite-v1:0")
def recommend_book(genre: str) -> Messages.Type:
return Messages.User(f"Recommend a {genre} book")
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="anthropic",
model="claude-3-5-sonnet-latest",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
from mirascope.core import prompt_template
@llm.call(provider="openai", model="gpt-4o-mini")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str): ...
response = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="anthropic",
model="claude-3-5-sonnet-latest",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
from mirascope.core import prompt_template
@llm.call(provider="anthropic", model="claude-3-5-sonnet-latest")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str): ...
response = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="openai",
model="gpt-4o-mini",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
from mirascope.core import prompt_template
@llm.call(provider="google", model="gemini-2.0-flash")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str): ...
response = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="anthropic",
model="claude-3-5-sonnet-latest",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
from mirascope.core import prompt_template
@llm.call(provider="groq", model="llama-3.1-70b-versatile")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str): ...
response = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="anthropic",
model="claude-3-5-sonnet-latest",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
from mirascope.core import prompt_template
@llm.call(provider="xai", model="grok-3")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str): ...
response = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="anthropic",
model="claude-3-5-sonnet-latest",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
from mirascope.core import prompt_template
@llm.call(provider="mistral", model="mistral-large-latest")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str): ...
response = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="anthropic",
model="claude-3-5-sonnet-latest",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
from mirascope.core import prompt_template
@llm.call(provider="cohere", model="command-r-plus")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str): ...
response = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="anthropic",
model="claude-3-5-sonnet-latest",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
from mirascope.core import prompt_template
@llm.call(provider="litellm", model="gpt-4o-mini")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str): ...
response = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="anthropic",
model="claude-3-5-sonnet-latest",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
from mirascope.core import prompt_template
@llm.call(provider="azure", model="gpt-4o-mini")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str): ...
response = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="anthropic",
model="claude-3-5-sonnet-latest",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
from mirascope.core import prompt_template
@llm.call(provider="bedrock", model="amazon.nova-lite-v1:0")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str): ...
response = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="anthropic",
model="claude-3-5-sonnet-latest",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
from mirascope.core import BaseMessageParam
@llm.call(provider="openai", model="gpt-4o-mini")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="anthropic",
model="claude-3-5-sonnet-latest",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
from mirascope.core import BaseMessageParam
@llm.call(provider="anthropic", model="claude-3-5-sonnet-latest")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="openai",
model="gpt-4o-mini",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
from mirascope.core import BaseMessageParam
@llm.call(provider="google", model="gemini-2.0-flash")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="anthropic",
model="claude-3-5-sonnet-latest",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
from mirascope.core import BaseMessageParam
@llm.call(provider="groq", model="llama-3.1-70b-versatile")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="anthropic",
model="claude-3-5-sonnet-latest",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
from mirascope.core import BaseMessageParam
@llm.call(provider="xai", model="grok-3")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="anthropic",
model="claude-3-5-sonnet-latest",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
from mirascope.core import BaseMessageParam
@llm.call(provider="mistral", model="mistral-large-latest")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="anthropic",
model="claude-3-5-sonnet-latest",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
from mirascope.core import BaseMessageParam
@llm.call(provider="cohere", model="command-r-plus")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="anthropic",
model="claude-3-5-sonnet-latest",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
from mirascope.core import BaseMessageParam
@llm.call(provider="litellm", model="gpt-4o-mini")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="anthropic",
model="claude-3-5-sonnet-latest",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
from mirascope.core import BaseMessageParam
@llm.call(provider="azure", model="gpt-4o-mini")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="anthropic",
model="claude-3-5-sonnet-latest",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
from mirascope import llm
from mirascope.core import BaseMessageParam
@llm.call(provider="bedrock", model="amazon.nova-lite-v1:0")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
override_response = llm.override(
recommend_book,
provider="anthropic",
model="claude-3-5-sonnet-latest",
call_params={"temperature": 0.7},
)("fantasy")
print(override_response.content)
Handling Responses¶
Common Response Properties and Methods¶
API Documentation
All BaseCallResponse
objects share these common properties:
content
: The main text content of the response. If no content is present, this will be the empty string.finish_reasons
: A list of reasons why the generation finished (e.g., "stop", "length"). These will be typed specifically for the provider used. If no finish reasons are present, this will beNone
.model
: The name of the model used for generation.id
: A unique identifier for the response if available. Otherwise this will beNone
.usage
: Information about token usage for the call if available. Otherwise this will beNone
.input_tokens
: The number of input tokens used if available. Otherwise this will beNone
.output_tokens
: The number of output tokens generated if available. Otherwise this will beNone
.cost
: An estimated cost of the API call if available. Otherwise this will beNone
.message_param
: The assistant's response formatted as a message parameter.tools
: A list of provider-specific tools used in the response, if any. Otherwise this will beNone
. Check out theTools
documentation for more details.tool
: The first tool used in the response, if any. Otherwise this will beNone
. Check out theTools
documentation for more details.tool_types
: A list of tool types used in the call, if any. Otherwise this will beNone
.prompt_template
: The prompt template used for the call.fn_args
: The arguments passed to the function.dynamic_config
: The dynamic configuration used for the call.metadata
: Any metadata provided using the dynamic configuration.messages
: The list of messages sent in the request.call_params
: The call parameters provided to thecall
decorator.call_kwargs
: The finalized keyword arguments used to make the API call.user_message_param
: The most recent user message, if any. Otherwise this will beNone
.start_time
: The timestamp when the call started.end_time
: The timestamp when the call ended.
There are also two common methods:
__str__
: Returns thecontent
property of the response for easy printing.tool_message_params
: Creates message parameters for tool call results. Check out theTools
documentation for more information.
Multi-Modal Outputs¶
While most LLM providers focus on text outputs, some providers support additional output modalities like audio. The availability of multi-modal outputs varies among providers:
Provider | Text | Audio | Image |
---|---|---|---|
OpenAI | ✓ | ✓ | - |
Anthropic | ✓ | - | - |
Mistral | ✓ | - | - |
Google Gemini | ✓ | - | - |
Groq | ✓ | - | - |
Cohere | ✓ | - | - |
LiteLLM | ✓ | - | - |
Azure AI | ✓ | - | - |
Legend: ✓ (Supported), - (Not Supported)
Audio Outputs¶
audio
: Configuration for the audio output (voice, format, etc.)modalities
: List of output modalities to receive (e.g.["text", "audio"]
)
For providers that support audio outputs, you can receive both text and audio responses from your calls:
import io
import wave
from pydub.playback import play
from pydub import AudioSegment
from mirascope.core import openai
@openai.call(
"gpt-4o-audio-preview",
call_params={
"audio": {"voice": "alloy", "format": "wav"},
"modalities": ["text", "audio"],
},
)
def recommend_book(genre: str) -> str:
return f"Recommend a {genre} book"
response = recommend_book(genre="fantasy")
print(response.audio_transcript)
if audio := response.audio:
audio_io = io.BytesIO(audio)
with wave.open(audio_io, "rb") as f:
audio_segment = AudioSegment.from_raw(
audio_io,
sample_width=f.getsampwidth(),
frame_rate=f.getframerate(),
channels=f.getnchannels(),
)
play(audio_segment)
import io
import wave
from pydub.playback import play
from pydub import AudioSegment
from mirascope.core import openai, Messages
@openai.call(
"gpt-4o-audio-preview",
call_params={
"audio": {"voice": "alloy", "format": "wav"},
"modalities": ["text", "audio"],
},
)
def recommend_book(genre: str) -> Messages.Type:
return Messages.User(f"Recommend a {genre} book")
response = recommend_book(genre="fantasy")
print(response.audio_transcript)
if audio := response.audio:
audio_io = io.BytesIO(audio)
with wave.open(audio_io, "rb") as f:
audio_segment = AudioSegment.from_raw(audio_io)
play(audio_segment)
import io
import wave
from pydub.playback import play
from pydub import AudioSegment
from mirascope.core import openai, prompt_template
@openai.call(
"gpt-4o-audio-preview",
call_params={
"audio": {"voice": "alloy", "format": "wav"},
"modalities": ["text", "audio"],
},
)
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str): ...
response = recommend_book(genre="fantasy")
print(response.audio_transcript)
if audio := response.audio:
audio_io = io.BytesIO(audio)
with wave.open(audio_io, "rb") as f:
audio_segment = AudioSegment.from_raw(
audio_io,
sample_width=f.getsampwidth(),
frame_rate=f.getframerate(),
channels=f.getnchannels(),
)
play(audio_segment)
import io
import wave
from pydub.playback import play
from pydub import AudioSegment
from mirascope.core import openai, BaseMessageParam
@openai.call(
"gpt-4o-audio-preview",
call_params={
"audio": {"voice": "alloy", "format": "wav"},
"modalities": ["text", "audio"],
},
)
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response = recommend_book(genre="fantasy")
print(response.audio_transcript)
if audio := response.audio:
audio_io = io.BytesIO(audio)
with wave.open(audio_io, "rb") as f:
audio_segment = AudioSegment.from_raw(
audio_io,
sample_width=f.getsampwidth(),
frame_rate=f.getframerate(),
channels=f.getnchannels(),
)
play(audio_segment)
When using models that support audio outputs, you'll have access to:
content
: The text content of the responseaudio
: The raw audio bytes of the responseaudio_transcript
: The transcript of the audio response
Audio Playback Requirements
The example above uses pydub
and ffmpeg
for audio playback, but you can use any audio processing libraries or media players that can handle WAV format audio data. Choose the tools that best fit your needs and environment.
If you decide to use pydub:
- Install pydub: pip install pydub
- Install ffmpeg: Available from ffmpeg.org or through system package managers
Voice Options
For providers that support audio outputs, refer to their documentation for available voice options and configurations:
- OpenAI: Text to Speech Guide
Common Parameters Across Providers¶
There are several common parameters that you'll find across all providers when using the call
decorator. These parameters allow you to control various aspects of the LLM call:
model
: The only required parameter for all providers, which may be passed in as a standard argument (whereas all others are optional and must be provided as keyword arguments). It specifies which language model to use for the generation. Each provider has its own set of available models.stream
: A boolean that determines whether the response should be streamed or returned as a complete response. We cover this in more detail in theStreams
documentation.response_model
: A PydanticBaseModel
type that defines how to structure the response. We cover this in more detail in theResponse Models
documentation.output_parser
: A function for parsing the response output. We cover this in more detail in theOutput Parsers
documentation.json_mode
: A boolean that deterines whether to use JSON mode or not. We cover this in more detail in theJSON Mode
documentation.tools
: A list of tools that the model may request to use in its response. We cover this in more detail in theTools
documentation.client
: A custom client to use when making the call to the LLM. We cover this in more detail in theCustom Client
section below.call_params
: The provider-specific parameters to use when making the call to that provider's API. We cover this in more detail in theProvider-Specific Usage
section below.
These common parameters provide a consistent way to control the behavior of LLM calls across different providers. Keep in mind that while these parameters are widely supported, there might be slight variations in how they're implemented or their exact effects across different providers (and the documentation should cover any such differences).
Since call_params
is just a TypedDict
, you can always include any additional keys at the expense of type errors (and potentially unknown behavior). This presents one way to pass provider-specific parameters (or deprecated parameters) while still using the general interface.
from mirascope import BaseMessageParam, llm
@llm.call(provider="openai", model="gpt-4o-mini", call_params={"max_tokens": 512})
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: llm.CallResponse = recommend_book("fantasy")
from mirascope import BaseMessageParam, llm
@llm.call(provider="anthropic", model="claude-3-5-sonnet-latest", call_params={"max_tokens": 512})
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: llm.CallResponse = recommend_book("fantasy")
from mirascope import BaseMessageParam, llm
@llm.call(provider="google", model="gemini-2.0-flash", call_params={"max_tokens": 512})
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: llm.CallResponse = recommend_book("fantasy")
from mirascope import BaseMessageParam, llm
@llm.call(provider="groq", model="llama-3.1-70b-versatile", call_params={"max_tokens": 512})
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: llm.CallResponse = recommend_book("fantasy")
from mirascope import BaseMessageParam, llm
@llm.call(provider="xai", model="grok-3", call_params={"max_tokens": 512})
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: llm.CallResponse = recommend_book("fantasy")
from mirascope import BaseMessageParam, llm
@llm.call(provider="mistral", model="mistral-large-latest", call_params={"max_tokens": 512})
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: llm.CallResponse = recommend_book("fantasy")
from mirascope import BaseMessageParam, llm
@llm.call(provider="cohere", model="command-r-plus", call_params={"max_tokens": 512})
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: llm.CallResponse = recommend_book("fantasy")
from mirascope import BaseMessageParam, llm
@llm.call(provider="litellm", model="gpt-4o-mini", call_params={"max_tokens": 512})
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: llm.CallResponse = recommend_book("fantasy")
from mirascope import BaseMessageParam, llm
@llm.call(provider="azure", model="gpt-4o-mini", call_params={"max_tokens": 512})
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: llm.CallResponse = recommend_book("fantasy")
from mirascope import BaseMessageParam, llm
@llm.call(provider="bedrock", model="amazon.nova-lite-v1:0", call_params={"max_tokens": 512})
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: llm.CallResponse = recommend_book("fantasy")
Dynamic Configuration¶
Often you will want (or need) to configure your calls dynamically at runtime. Mirascope supports returning a BaseDynamicConfig
from your prompt template, which will then be used to dynamically update the settings of the call.
In all cases, you will need to return your prompt messages through the messages
keyword of the dynamic config unless you're using string templates.
Call Params¶
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="openai", model="gpt-4o-mini")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="anthropic", model="claude-3-5-sonnet-latest")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="google", model="gemini-2.0-flash")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="groq", model="llama-3.1-70b-versatile")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="xai", model="grok-3")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="mistral", model="mistral-large-latest")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="cohere", model="command-r-plus")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="litellm", model="gpt-4o-mini")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="azure", model="gpt-4o-mini")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="bedrock", model="amazon.nova-lite-v1:0")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="openai", model="gpt-4o-mini")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="anthropic", model="claude-3-5-sonnet-latest")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="google", model="gemini-2.0-flash")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="groq", model="llama-3.1-70b-versatile")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="xai", model="grok-3")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="mistral", model="mistral-large-latest")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="cohere", model="command-r-plus")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="litellm", model="gpt-4o-mini")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="azure", model="gpt-4o-mini")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="bedrock", model="amazon.nova-lite-v1:0")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, llm, prompt_template
@llm.call(provider="openai", model="gpt-4o-mini")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, llm, prompt_template
@llm.call(provider="anthropic", model="claude-3-5-sonnet-latest")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, llm, prompt_template
@llm.call(provider="google", model="gemini-2.0-flash")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, llm, prompt_template
@llm.call(provider="groq", model="llama-3.1-70b-versatile")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, llm, prompt_template
@llm.call(provider="xai", model="grok-3")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, llm, prompt_template
@llm.call(provider="mistral", model="mistral-large-latest")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, llm, prompt_template
@llm.call(provider="cohere", model="command-r-plus")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, llm, prompt_template
@llm.call(provider="litellm", model="gpt-4o-mini")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, llm, prompt_template
@llm.call(provider="azure", model="gpt-4o-mini")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, llm, prompt_template
@llm.call(provider="bedrock", model="amazon.nova-lite-v1:0")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, BaseMessageParam, llm
@llm.call(provider="openai", model="gpt-4o-mini")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [
BaseMessageParam(role="user", content=f"Recommend a {genre} book")
],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, BaseMessageParam, llm
@llm.call(provider="anthropic", model="claude-3-5-sonnet-latest")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [
BaseMessageParam(role="user", content=f"Recommend a {genre} book")
],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, BaseMessageParam, llm
@llm.call(provider="google", model="gemini-2.0-flash")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [
BaseMessageParam(role="user", content=f"Recommend a {genre} book")
],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, BaseMessageParam, llm
@llm.call(provider="groq", model="llama-3.1-70b-versatile")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [
BaseMessageParam(role="user", content=f"Recommend a {genre} book")
],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, BaseMessageParam, llm
@llm.call(provider="xai", model="grok-3")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [
BaseMessageParam(role="user", content=f"Recommend a {genre} book")
],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, BaseMessageParam, llm
@llm.call(provider="mistral", model="mistral-large-latest")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [
BaseMessageParam(role="user", content=f"Recommend a {genre} book")
],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, BaseMessageParam, llm
@llm.call(provider="cohere", model="command-r-plus")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [
BaseMessageParam(role="user", content=f"Recommend a {genre} book")
],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, BaseMessageParam, llm
@llm.call(provider="litellm", model="gpt-4o-mini")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [
BaseMessageParam(role="user", content=f"Recommend a {genre} book")
],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, BaseMessageParam, llm
@llm.call(provider="azure", model="gpt-4o-mini")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [
BaseMessageParam(role="user", content=f"Recommend a {genre} book")
],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, BaseMessageParam, llm
@llm.call(provider="bedrock", model="amazon.nova-lite-v1:0")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [
BaseMessageParam(role="user", content=f"Recommend a {genre} book")
],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
Metadata¶
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="openai", model="gpt-4o-mini")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="anthropic", model="claude-3-5-sonnet-latest")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="google", model="gemini-2.0-flash")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="groq", model="llama-3.1-70b-versatile")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="xai", model="grok-3")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="mistral", model="mistral-large-latest")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="cohere", model="command-r-plus")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="litellm", model="gpt-4o-mini")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="azure", model="gpt-4o-mini")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="bedrock", model="amazon.nova-lite-v1:0")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="openai", model="gpt-4o-mini")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="anthropic", model="claude-3-5-sonnet-latest")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="google", model="gemini-2.0-flash")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="groq", model="llama-3.1-70b-versatile")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="xai", model="grok-3")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="mistral", model="mistral-large-latest")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="cohere", model="command-r-plus")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="litellm", model="gpt-4o-mini")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="azure", model="gpt-4o-mini")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, Messages, llm
@llm.call(provider="bedrock", model="amazon.nova-lite-v1:0")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, llm, prompt_template
@llm.call(provider="openai", model="gpt-4o-mini")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, llm, prompt_template
@llm.call(provider="anthropic", model="claude-3-5-sonnet-latest")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, llm, prompt_template
@llm.call(provider="google", model="gemini-2.0-flash")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, llm, prompt_template
@llm.call(provider="groq", model="llama-3.1-70b-versatile")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, llm, prompt_template
@llm.call(provider="xai", model="grok-3")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, llm, prompt_template
@llm.call(provider="mistral", model="mistral-large-latest")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, llm, prompt_template
@llm.call(provider="cohere", model="command-r-plus")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, llm, prompt_template
@llm.call(provider="litellm", model="gpt-4o-mini")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, llm, prompt_template
@llm.call(provider="azure", model="gpt-4o-mini")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, llm, prompt_template
@llm.call(provider="bedrock", model="amazon.nova-lite-v1:0")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, BaseMessageParam, llm
@llm.call(provider="openai", model="gpt-4o-mini")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [
BaseMessageParam(role="user", content=f"Recommend a {genre} book")
],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, BaseMessageParam, llm
@llm.call(provider="anthropic", model="claude-3-5-sonnet-latest")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [
BaseMessageParam(role="user", content=f"Recommend a {genre} book")
],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, BaseMessageParam, llm
@llm.call(provider="google", model="gemini-2.0-flash")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [
BaseMessageParam(role="user", content=f"Recommend a {genre} book")
],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, BaseMessageParam, llm
@llm.call(provider="groq", model="llama-3.1-70b-versatile")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [
BaseMessageParam(role="user", content=f"Recommend a {genre} book")
],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, BaseMessageParam, llm
@llm.call(provider="xai", model="grok-3")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [
BaseMessageParam(role="user", content=f"Recommend a {genre} book")
],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, BaseMessageParam, llm
@llm.call(provider="mistral", model="mistral-large-latest")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [
BaseMessageParam(role="user", content=f"Recommend a {genre} book")
],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, BaseMessageParam, llm
@llm.call(provider="cohere", model="command-r-plus")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [
BaseMessageParam(role="user", content=f"Recommend a {genre} book")
],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, BaseMessageParam, llm
@llm.call(provider="litellm", model="gpt-4o-mini")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [
BaseMessageParam(role="user", content=f"Recommend a {genre} book")
],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, BaseMessageParam, llm
@llm.call(provider="azure", model="gpt-4o-mini")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [
BaseMessageParam(role="user", content=f"Recommend a {genre} book")
],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
from mirascope import BaseDynamicConfig, BaseMessageParam, llm
@llm.call(provider="bedrock", model="amazon.nova-lite-v1:0")
def recommend_book(genre: str) -> BaseDynamicConfig:
return {
"messages": [
BaseMessageParam(role="user", content=f"Recommend a {genre} book")
],
"call_params": {"max_tokens": 512},
"metadata": {"tags": {"version:0001"}},
}
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
Provider-Specific Usage¶
API Documentation
While Mirascope provides a consistent interface across different LLM providers, you can also use provider-specific modules with refined typing for an individual provider.
When using the provider modules, you'll receive a provider-specific BaseCallResponse
object, which may have extra properties. Regardless, you can always access the full, provider-specific response object as response.response
.
from mirascope.core import BaseMessageParam, openai
@openai.call("gpt-4o-mini")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: openai.OpenAICallResponse = recommend_book("fantasy")
print(response.content)
from mirascope.core import BaseMessageParam, anthropic
@anthropic.call("claude-3-5-sonnet-latest")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: anthropic.AnthropicCallResponse = recommend_book("fantasy")
print(response.content)
from mirascope.core import BaseMessageParam, google
@google.call("gemini-2.0-flash")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: google.GoogleCallResponse = recommend_book("fantasy")
print(response.content)
from mirascope.core import BaseMessageParam, groq
@groq.call("llama-3.1-70b-versatile")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: groq.GroqCallResponse = recommend_book("fantasy")
print(response.content)
from mirascope.core import BaseMessageParam, mistral
@mistral.call("mistral-large-latest")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: mistral.MistralCallResponse = recommend_book("fantasy")
print(response.content)
from mirascope.core import BaseMessageParam, cohere
@cohere.call("command-r-plus")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: cohere.CohereCallResponse = recommend_book("fantasy")
print(response.content)
from mirascope.core import BaseMessageParam, litellm
@litellm.call("gpt-4o-mini")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: litellm.LiteLLMCallResponse = recommend_book("fantasy")
print(response.content)
from mirascope.core import BaseMessageParam, bedrock
@bedrock.call("amazon.nova-lite-v1:0")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
response: bedrock.BedrockCallResponse = recommend_book("fantasy")
print(response.content)
Reasoning For Provider-Specific BaseCallResponse
Objects
The reason that we have provider-specific response objects (e.g. OpenAICallResponse
) is to provide proper type hints and safety when accessing the original response.
Custom Messages¶
When using provider-specific calls, you can also always return the original message types for that provider. To do so, simply return the provider-specific dynamic config:
from mirascope.core import anthropic
@anthropic.call("claude-3-5-sonnet-latest")
def recommend_book(genre: str) -> anthropic.AnthropicDynamicConfig:
return {"messages": [{"role": "user", "content": f"Recommend a {genre} book"}]}
response: anthropic.AnthropicCallResponse = recommend_book("fantasy")
print(response.content)
from mirascope.core import mistral
from mistralai.models import UserMessage
@mistral.call("mistral-large-latest")
def recommend_book(genre: str) -> mistral.MistralDynamicConfig:
return {"messages": [UserMessage(role="user", content=f"Recommend a {genre} book")]}
response: mistral.MistralCallResponse = recommend_book("fantasy")
print(response.content)
from cohere.types.chat_message import ChatMessage
from mirascope.core import cohere
@cohere.call("command-r-plus")
def recommend_book(genre: str) -> cohere.CohereDynamicConfig:
return {"messages": [ChatMessage(role="user", message=f"Recommend a {genre} book")]} # pyright: ignore [reportCallIssue, reportReturnType]
response: cohere.CohereCallResponse = recommend_book("fantasy")
print(response.content)
from azure.ai.inference.models import UserMessage
from mirascope.core import azure
@azure.call("gpt-4o-mini")
def recommend_book(genre: str) -> azure.AzureDynamicConfig:
return {"messages": [UserMessage(content=f"Recommend a {genre} book")]}
response: azure.AzureCallResponse = recommend_book("fantasy")
print(response.content)
from mirascope.core import bedrock
@bedrock.call("amazon.nova-lite-v1:0")
def recommend_book(genre: str) -> bedrock.BedrockDynamicConfig:
return {
"messages": [
{"role": "user", "content": [{"text": f"Recommend a {genre} book"}]}
]
}
response: bedrock.BedrockCallResponse = recommend_book("fantasy")
print(response.content)
Support for provider-specific messages ensures that you can still access newly released provider-specific features that Mirascope may not yet support natively.
Custom Client¶
Mirascope allows you to use custom clients when making calls to LLM providers. This feature is particularly useful when you need to use specific client configurations, handle authentication in a custom way, or work with self-hosted models.
Decorator Parameter:
You can pass a client to the call
decorator using the client
parameter:
import os
from mirascope.core import mistral
from mistralai import Mistral
@mistral.call(
"mistral-large-latest",
client=Mistral(api_key=os.environ["MISTRAL_API_KEY"]),
)
def recommend_book(genre: str) -> str:
return f"Recommend a {genre} book"
@mistral.call(
"mistral-large-latest",
client=Mistral(api_key=os.environ["MISTRAL_API_KEY"]),
)
async def recommend_book_async(genre: str) -> str:
return f"Recommend a {genre} book"
from azure.ai.inference import ChatCompletionsClient
from azure.core.credentials import AzureKeyCredential
from mirascope.core import azure
@azure.call(
"gpt-4o-mini",
client=ChatCompletionsClient(
endpoint="https://my-endpoint.openai.azure.com/openai/deployments/gpt-4o-mini/",
credential=AzureKeyCredential("..."),
),
)
def recommend_book(genre: str) -> str:
return f"Recommend a {genre} book"
import os
from mirascope.core import Messages, mistral
from mistralai import Mistral
@mistral.call(
"mistral-large-latest",
client=Mistral(api_key=os.environ["MISTRAL_API_KEY"]),
)
def recommend_book(genre: str) -> Messages.Type:
return Messages.User(f"Recommend a {genre} book")
@mistral.call(
"mistral-large-latest",
client=Mistral(api_key=os.environ["MISTRAL_API_KEY"]),
)
async def recommend_book_async(genre: str) -> Messages.Type:
return Messages.User(f"Recommend a {genre} book")
from azure.ai.inference import ChatCompletionsClient
from azure.core.credentials import AzureKeyCredential
from mirascope.core import Messages, azure
@azure.call(
"gpt-4o-mini",
client=ChatCompletionsClient(
endpoint="https://my-endpoint.openai.azure.com/openai/deployments/gpt-4o-mini/",
credential=AzureKeyCredential("..."),
),
)
def recommend_book(genre: str) -> Messages.Type:
return Messages.User(f"Recommend a {genre} book")
import os
from mirascope.core import mistral, prompt_template
from mistralai import Mistral
@mistral.call(
"mistral-large-latest",
client=Mistral(api_key=os.environ["MISTRAL_API_KEY"]),
)
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str): ...
@mistral.call(
"mistral-large-latest",
client=Mistral(api_key=os.environ["MISTRAL_API_KEY"]),
)
@prompt_template("Recommend a {genre} book")
async def recommend_book_async(genre: str): ...
from azure.ai.inference import ChatCompletionsClient
from azure.core.credentials import AzureKeyCredential
from mirascope.core import azure, prompt_template
@azure.call(
"gpt-4o-mini",
client=ChatCompletionsClient(
endpoint="https://my-endpoint.openai.azure.com/openai/deployments/gpt-4o-mini/",
credential=AzureKeyCredential("..."),
),
)
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str): ...
from google.genai import Client
from mirascope.core import BaseMessageParam, google
@google.call(
"gemini-2.0-flash",
client=Client(vertexai=True, project="your-project-id", location="us-central1"),
)
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
import os
from mirascope.core import BaseMessageParam, mistral
from mistralai import Mistral
@mistral.call(
"mistral-large-latest",
client=Mistral(api_key=os.environ["MISTRAL_API_KEY"]),
)
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
@mistral.call(
"mistral-large-latest",
client=Mistral(api_key=os.environ["MISTRAL_API_KEY"]),
)
async def recommend_book_async(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
from azure.ai.inference import ChatCompletionsClient
from azure.core.credentials import AzureKeyCredential
from mirascope.core import BaseMessageParam, azure
@azure.call(
"gpt-4o-mini",
client=ChatCompletionsClient(
endpoint="https://my-endpoint.openai.azure.com/openai/deployments/gpt-4o-mini/",
credential=AzureKeyCredential("..."),
),
)
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
Dynamic Configuration:
You can also configure the client dynamically at runtime through the dynamic configuration:
from google.genai import Client
from mirascope.core import Messages, google
@google.call("gemini-2.0-flash")
def recommend_book(genre: str) -> google.GoogleDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"client": Client(
vertexai=True, project="your-project-id", location="us-central1"
),
}
import os
from mirascope.core import mistral, Messages
from mistralai import Mistral
@mistral.call("mistral-large-latest")
def recommend_book(genre: str) -> mistral.MistralDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"client": Mistral(api_key=os.environ["MISTRAL_API_KEY"]),
}
from azure.ai.inference import ChatCompletionsClient
from azure.core.credentials import AzureKeyCredential
from mirascope.core import azure, Messages
@azure.call("gpt-4o-mini")
def recommend_book(genre: str) -> azure.AzureDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"client": ChatCompletionsClient(
endpoint="your-endpoint", credential=AzureKeyCredential("your-credentials")
),
}
from google.genai import Client
from mirascope.core import Messages, google
@google.call("gemini-2.0-flash")
def recommend_book(genre: str) -> google.GoogleDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"client": Client(
vertexai=True, project="your-project-id", location="us-central1"
),
}
import os
from mirascope.core import Messages, mistral
from mistralai import Mistral
@mistral.call("mistral-large-latest")
def recommend_book(genre: str) -> mistral.MistralDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"client": Mistral(api_key=os.environ["MISTRAL_API_KEY"]),
}
from azure.ai.inference import ChatCompletionsClient
from azure.core.credentials import AzureKeyCredential
from mirascope.core import Messages, azure
@azure.call("gpt-4o-mini")
def recommend_book(genre: str) -> azure.AzureDynamicConfig:
return {
"messages": [Messages.User(f"Recommend a {genre} book")],
"client": ChatCompletionsClient(
endpoint="your-endpoint", credential=AzureKeyCredential("your-credentials")
),
}
from google.genai import Client
from mirascope.core import google, prompt_template
@google.call("gemini-2.0-flash")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str) -> google.GoogleDynamicConfig:
return {
"client": Client(
vertexai=True, project="your-project-id", location="us-central1"
),
}
import os
from mirascope.core import mistral, prompt_template
from mistralai import Mistral
@mistral.call("mistral-large-latest")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str) -> mistral.MistralDynamicConfig:
return {
"client": Mistral(api_key=os.environ["MISTRAL_API_KEY"]),
}
from azure.ai.inference import ChatCompletionsClient
from azure.core.credentials import AzureKeyCredential
from mirascope.core import azure, prompt_template
@azure.call("gpt-4o-mini")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str) -> azure.AzureDynamicConfig:
return {
"client": ChatCompletionsClient(
endpoint="your-endpoint", credential=AzureKeyCredential("your-credentials")
),
}
from anthropic import Anthropic
from mirascope.core import BaseMessageParam, anthropic
@anthropic.call("claude-3-5-sonnet-latest")
def recommend_book(genre: str) -> anthropic.AnthropicDynamicConfig:
return {
"messages": [
BaseMessageParam(role="user", content=f"Recommend a {genre} book")
],
"client": Anthropic(),
}
from google.genai import Client
from mirascope.core import BaseMessageParam, google
@google.call("gemini-2.0-flash")
def recommend_book(genre: str) -> google.GoogleDynamicConfig:
return {
"messages": [
BaseMessageParam(role="user", content=f"Recommend a {genre} book")
],
"client": Client(
vertexai=True, project="your-project-id", location="us-central1"
),
}
import os
from mirascope.core import BaseMessageParam, mistral
from mistralai import Mistral
@mistral.call("mistral-large-latest")
def recommend_book(genre: str) -> mistral.MistralDynamicConfig:
return {
"messages": [
BaseMessageParam(role="user", content=f"Recommend a {genre} book")
],
"client": Mistral(api_key=os.environ["MISTRAL_API_KEY"]),
}
from azure.ai.inference import ChatCompletionsClient
from azure.core.credentials import AzureKeyCredential
from mirascope.core import BaseMessageParam, azure
@azure.call("gpt-4o-mini")
def recommend_book(genre: str) -> azure.AzureDynamicConfig:
return {
"messages": [
BaseMessageParam(role="user", content=f"Recommend a {genre} book")
],
"client": ChatCompletionsClient(
endpoint="your-endpoint", credential=AzureKeyCredential("your-credentials")
),
}
import boto3
from mirascope.core import BaseMessageParam, bedrock
@bedrock.call("amazon.nova-lite-v1:0")
def recommend_book(genre: str) -> bedrock.BedrockDynamicConfig:
return {
"messages": [
BaseMessageParam(role="user", content=f"Recommend a {genre} book")
],
"client": boto3.client("bedrock-runtime"),
}
Make sure to use the correct client!
A common mistake is to use the synchronous client with async calls. Read the section on Async Custom Client to see how to use a custom client with asynchronous calls.
Error Handling¶
When making LLM calls, it's important to handle potential errors. Mirascope preserves the original error messages from providers, allowing you to catch and handle them appropriately:
from mirascope import Messages, llm
@llm.call(provider="openai", model="gpt-4o-mini")
def recommend_book(genre: str) -> Messages.Type:
return Messages.User(f"Recommend a {genre} book")
try:
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
except Exception as e:
print(f"Error: {str(e)}")
from mirascope import Messages, llm
@llm.call(provider="anthropic", model="claude-3-5-sonnet-latest")
def recommend_book(genre: str) -> Messages.Type:
return Messages.User(f"Recommend a {genre} book")
try:
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
except Exception as e:
print(f"Error: {str(e)}")
from mirascope import Messages, llm
@llm.call(provider="google", model="gemini-2.0-flash")
def recommend_book(genre: str) -> Messages.Type:
return Messages.User(f"Recommend a {genre} book")
try:
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
except Exception as e:
print(f"Error: {str(e)}")
from mirascope import Messages, llm
@llm.call(provider="groq", model="llama-3.1-70b-versatile")
def recommend_book(genre: str) -> Messages.Type:
return Messages.User(f"Recommend a {genre} book")
try:
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
except Exception as e:
print(f"Error: {str(e)}")
from mirascope import Messages, llm
@llm.call(provider="xai", model="grok-3")
def recommend_book(genre: str) -> Messages.Type:
return Messages.User(f"Recommend a {genre} book")
try:
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
except Exception as e:
print(f"Error: {str(e)}")
from mirascope import Messages, llm
@llm.call(provider="mistral", model="mistral-large-latest")
def recommend_book(genre: str) -> Messages.Type:
return Messages.User(f"Recommend a {genre} book")
try:
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
except Exception as e:
print(f"Error: {str(e)}")
from mirascope import Messages, llm
@llm.call(provider="cohere", model="command-r-plus")
def recommend_book(genre: str) -> Messages.Type:
return Messages.User(f"Recommend a {genre} book")
try:
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
except Exception as e:
print(f"Error: {str(e)}")
from mirascope import Messages, llm
@llm.call(provider="litellm", model="gpt-4o-mini")
def recommend_book(genre: str) -> Messages.Type:
return Messages.User(f"Recommend a {genre} book")
try:
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
except Exception as e:
print(f"Error: {str(e)}")
from mirascope import Messages, llm
@llm.call(provider="azure", model="gpt-4o-mini")
def recommend_book(genre: str) -> Messages.Type:
return Messages.User(f"Recommend a {genre} book")
try:
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
except Exception as e:
print(f"Error: {str(e)}")
from mirascope import Messages, llm
@llm.call(provider="bedrock", model="amazon.nova-lite-v1:0")
def recommend_book(genre: str) -> Messages.Type:
return Messages.User(f"Recommend a {genre} book")
try:
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
except Exception as e:
print(f"Error: {str(e)}")
from mirascope import llm, prompt_template
@llm.call(provider="openai", model="gpt-4o-mini")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str): ...
try:
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
except Exception as e:
print(f"Error: {str(e)}")
from mirascope import llm, prompt_template
@llm.call(provider="anthropic", model="claude-3-5-sonnet-latest")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str): ...
try:
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
except Exception as e:
print(f"Error: {str(e)}")
from mirascope import llm, prompt_template
@llm.call(provider="google", model="gemini-2.0-flash")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str): ...
try:
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
except Exception as e:
print(f"Error: {str(e)}")
from mirascope import llm, prompt_template
@llm.call(provider="groq", model="llama-3.1-70b-versatile")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str): ...
try:
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
except Exception as e:
print(f"Error: {str(e)}")
from mirascope import llm, prompt_template
@llm.call(provider="mistral", model="mistral-large-latest")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str): ...
try:
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
except Exception as e:
print(f"Error: {str(e)}")
from mirascope import llm, prompt_template
@llm.call(provider="cohere", model="command-r-plus")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str): ...
try:
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
except Exception as e:
print(f"Error: {str(e)}")
from mirascope import llm, prompt_template
@llm.call(provider="litellm", model="gpt-4o-mini")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str): ...
try:
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
except Exception as e:
print(f"Error: {str(e)}")
from mirascope import llm, prompt_template
@llm.call(provider="azure", model="gpt-4o-mini")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str): ...
try:
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
except Exception as e:
print(f"Error: {str(e)}")
from mirascope import llm, prompt_template
@llm.call(provider="bedrock", model="amazon.nova-lite-v1:0")
@prompt_template("Recommend a {genre} book")
def recommend_book(genre: str): ...
try:
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
except Exception as e:
print(f"Error: {str(e)}")
from mirascope import BaseMessageParam, llm
@llm.call(provider="openai", model="gpt-4o-mini")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
try:
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
except Exception as e:
print(f"Error: {str(e)}")
from mirascope import BaseMessageParam, llm
@llm.call(provider="anthropic", model="claude-3-5-sonnet-latest")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
try:
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
except Exception as e:
print(f"Error: {str(e)}")
from mirascope import BaseMessageParam, llm
@llm.call(provider="google", model="gemini-2.0-flash")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
try:
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
except Exception as e:
print(f"Error: {str(e)}")
from mirascope import BaseMessageParam, llm
@llm.call(provider="groq", model="llama-3.1-70b-versatile")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
try:
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
except Exception as e:
print(f"Error: {str(e)}")
from mirascope import BaseMessageParam, llm
@llm.call(provider="xai", model="grok-3")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
try:
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
except Exception as e:
print(f"Error: {str(e)}")
from mirascope import BaseMessageParam, llm
@llm.call(provider="mistral", model="mistral-large-latest")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
try:
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
except Exception as e:
print(f"Error: {str(e)}")
from mirascope import BaseMessageParam, llm
@llm.call(provider="cohere", model="command-r-plus")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
try:
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
except Exception as e:
print(f"Error: {str(e)}")
from mirascope import BaseMessageParam, llm
@llm.call(provider="litellm", model="gpt-4o-mini")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
try:
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
except Exception as e:
print(f"Error: {str(e)}")
from mirascope import BaseMessageParam, llm
@llm.call(provider="azure", model="gpt-4o-mini")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
try:
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
except Exception as e:
print(f"Error: {str(e)}")
from mirascope import BaseMessageParam, llm
@llm.call(provider="bedrock", model="amazon.nova-lite-v1:0")
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [BaseMessageParam(role="user", content=f"Recommend a {genre} book")]
try:
response: llm.CallResponse = recommend_book("fantasy")
print(response.content)
except Exception as e:
print(f"Error: {str(e)}")
These examples catch the base Exception class; however, you can (and should) catch provider-specific exceptions instead when using provider-specific modules.
Next Steps¶
By mastering calls in Mirascope, you'll be well-equipped to build robust, flexible, and reusable LLM applications.
Next, we recommend choosing one of:
- Streams to see how to stream call responses for a more real-time interaction.
- Chaining to see how to chain calls together.
- Response Models to see how to generate structured outputs.
- Tools to see how to give LLMs access to custom tools to extend their capabilities.
- Async to see how to better take advantage of asynchronous programming and parallelization for improved performance.
Pick whichever path aligns best with what you're hoping to get from Mirascope.