Output Parsers¶
Output Parsers in Mirascope provide a flexible way to process and structure the raw output from Large Language Models (LLMs). They allow you to transform the LLM's response into a more usable format, enabling easier integration with your application logic and improving the overall reliability of your LLM-powered features.
Basic Usage and Syntax¶
API Documentation
mirascope.core.openai.call.output_parser
mirascope.core.anthropic.call.output_parser
mirascope.core.mistral.call.output_parser
mirascope.core.google.call.output_parser
mirascope.core.groq.call.output_parser
mirascope.core.cohere.call.output_parser
mirascope.core.litellm.call.output_parser
Output Parsers are functions that take the call response object as input and return an output of a specified type. When you supply an output parser to a call
decorator, it modifies the return type of the decorated function to match the output type of the parser.
Let's take a look at a basic example:
from mirascope import llm
def parse_recommendation(response: llm.CallResponse) -> tuple[str, str]:
title, author = response.content.split(" by ")
return (title, author)
@llm.call(provider="openai", model="gpt-4o-mini", output_parser=parse_recommendation)
def recommend_book(genre: str) -> str:
return f"Recommend a {genre} book. Output only Title by Author"
print(recommend_book("fantasy"))
# Output: ('"The Name of the Wind"', 'Patrick Rothfuss')
from mirascope import llm
def parse_recommendation(response: llm.CallResponse) -> tuple[str, str]:
title, author = response.content.split(" by ")
return (title, author)
@llm.call(provider="anthropic", model="claude-3-5-sonnet-latest", output_parser=parse_recommendation)
def recommend_book(genre: str) -> str:
return f"Recommend a {genre} book. Output only Title by Author"
print(recommend_book("fantasy"))
# Output: ('"The Name of the Wind"', 'Patrick Rothfuss')
from mirascope import llm
def parse_recommendation(response: llm.CallResponse) -> tuple[str, str]:
title, author = response.content.split(" by ")
return (title, author)
@llm.call(provider="mistral", model="mistral-large-latest", output_parser=parse_recommendation)
def recommend_book(genre: str) -> str:
return f"Recommend a {genre} book. Output only Title by Author"
print(recommend_book("fantasy"))
# Output: ('"The Name of the Wind"', 'Patrick Rothfuss')
from mirascope import llm
def parse_recommendation(response: llm.CallResponse) -> tuple[str, str]:
title, author = response.content.split(" by ")
return (title, author)
@llm.call(provider="google", model="gemini-2.0-flash", output_parser=parse_recommendation)
def recommend_book(genre: str) -> str:
return f"Recommend a {genre} book. Output only Title by Author"
print(recommend_book("fantasy"))
# Output: ('"The Name of the Wind"', 'Patrick Rothfuss')
from mirascope import llm
def parse_recommendation(response: llm.CallResponse) -> tuple[str, str]:
title, author = response.content.split(" by ")
return (title, author)
@llm.call(provider="groq", model="llama-3.1-70b-versatile", output_parser=parse_recommendation)
def recommend_book(genre: str) -> str:
return f"Recommend a {genre} book. Output only Title by Author"
print(recommend_book("fantasy"))
# Output: ('"The Name of the Wind"', 'Patrick Rothfuss')
from mirascope import llm
def parse_recommendation(response: llm.CallResponse) -> tuple[str, str]:
title, author = response.content.split(" by ")
return (title, author)
@llm.call(provider="cohere", model="command-r-plus", output_parser=parse_recommendation)
def recommend_book(genre: str) -> str:
return f"Recommend a {genre} book. Output only Title by Author"
print(recommend_book("fantasy"))
# Output: ('"The Name of the Wind"', 'Patrick Rothfuss')
from mirascope import llm
def parse_recommendation(response: llm.CallResponse) -> tuple[str, str]:
title, author = response.content.split(" by ")
return (title, author)
@llm.call(provider="litellm", model="gpt-4o-mini", output_parser=parse_recommendation)
def recommend_book(genre: str) -> str:
return f"Recommend a {genre} book. Output only Title by Author"
print(recommend_book("fantasy"))
# Output: ('"The Name of the Wind"', 'Patrick Rothfuss')
from mirascope import llm
def parse_recommendation(response: llm.CallResponse) -> tuple[str, str]:
title, author = response.content.split(" by ")
return (title, author)
@llm.call(provider="azure", model="gpt-4o-mini", output_parser=parse_recommendation)
def recommend_book(genre: str) -> str:
return f"Recommend a {genre} book. Output only Title by Author"
print(recommend_book("fantasy"))
# Output: ('"The Name of the Wind"', 'Patrick Rothfuss')
from mirascope import llm
def parse_recommendation(response: llm.CallResponse) -> tuple[str, str]:
title, author = response.content.split(" by ")
return (title, author)
@llm.call(provider="bedrock", model="amazon.nova-lite-v1:0", output_parser=parse_recommendation)
def recommend_book(genre: str) -> str:
return f"Recommend a {genre} book. Output only Title by Author"
print(recommend_book("fantasy"))
# Output: ('"The Name of the Wind"', 'Patrick Rothfuss')
from mirascope import Messages, llm
def parse_recommendation(response: llm.CallResponse) -> tuple[str, str]:
title, author = response.content.split(" by ")
return (title, author)
@llm.call(provider="openai", model="gpt-4o-mini", output_parser=parse_recommendation)
def recommend_book(genre: str) -> Messages.Type:
return Messages.User(f"Recommend a {genre} book. Output only Title by Author")
print(recommend_book("fantasy"))
# Output: ('"The Name of the Wind"', 'Patrick Rothfuss')
from mirascope import Messages, llm
def parse_recommendation(response: llm.CallResponse) -> tuple[str, str]:
title, author = response.content.split(" by ")
return (title, author)
@llm.call(provider="anthropic", model="claude-3-5-sonnet-latest", output_parser=parse_recommendation)
def recommend_book(genre: str) -> Messages.Type:
return Messages.User(f"Recommend a {genre} book. Output only Title by Author")
print(recommend_book("fantasy"))
# Output: ('"The Name of the Wind"', 'Patrick Rothfuss')
from mirascope import Messages, llm
def parse_recommendation(response: llm.CallResponse) -> tuple[str, str]:
title, author = response.content.split(" by ")
return (title, author)
@llm.call(provider="mistral", model="mistral-large-latest", output_parser=parse_recommendation)
def recommend_book(genre: str) -> Messages.Type:
return Messages.User(f"Recommend a {genre} book. Output only Title by Author")
print(recommend_book("fantasy"))
# Output: ('"The Name of the Wind"', 'Patrick Rothfuss')
from mirascope import Messages, llm
def parse_recommendation(response: llm.CallResponse) -> tuple[str, str]:
title, author = response.content.split(" by ")
return (title, author)
@llm.call(provider="google", model="gemini-2.0-flash", output_parser=parse_recommendation)
def recommend_book(genre: str) -> Messages.Type:
return Messages.User(f"Recommend a {genre} book. Output only Title by Author")
print(recommend_book("fantasy"))
# Output: ('"The Name of the Wind"', 'Patrick Rothfuss')
from mirascope import Messages, llm
def parse_recommendation(response: llm.CallResponse) -> tuple[str, str]:
title, author = response.content.split(" by ")
return (title, author)
@llm.call(provider="groq", model="llama-3.1-70b-versatile", output_parser=parse_recommendation)
def recommend_book(genre: str) -> Messages.Type:
return Messages.User(f"Recommend a {genre} book. Output only Title by Author")
print(recommend_book("fantasy"))
# Output: ('"The Name of the Wind"', 'Patrick Rothfuss')
from mirascope import Messages, llm
def parse_recommendation(response: llm.CallResponse) -> tuple[str, str]:
title, author = response.content.split(" by ")
return (title, author)
@llm.call(provider="cohere", model="command-r-plus", output_parser=parse_recommendation)
def recommend_book(genre: str) -> Messages.Type:
return Messages.User(f"Recommend a {genre} book. Output only Title by Author")
print(recommend_book("fantasy"))
# Output: ('"The Name of the Wind"', 'Patrick Rothfuss')
from mirascope import Messages, llm
def parse_recommendation(response: llm.CallResponse) -> tuple[str, str]:
title, author = response.content.split(" by ")
return (title, author)
@llm.call(provider="litellm", model="gpt-4o-mini", output_parser=parse_recommendation)
def recommend_book(genre: str) -> Messages.Type:
return Messages.User(f"Recommend a {genre} book. Output only Title by Author")
print(recommend_book("fantasy"))
# Output: ('"The Name of the Wind"', 'Patrick Rothfuss')
from mirascope import Messages, llm
def parse_recommendation(response: llm.CallResponse) -> tuple[str, str]:
title, author = response.content.split(" by ")
return (title, author)
@llm.call(provider="azure", model="gpt-4o-mini", output_parser=parse_recommendation)
def recommend_book(genre: str) -> Messages.Type:
return Messages.User(f"Recommend a {genre} book. Output only Title by Author")
print(recommend_book("fantasy"))
# Output: ('"The Name of the Wind"', 'Patrick Rothfuss')
from mirascope import Messages, llm
def parse_recommendation(response: llm.CallResponse) -> tuple[str, str]:
title, author = response.content.split(" by ")
return (title, author)
@llm.call(provider="bedrock", model="amazon.nova-lite-v1:0", output_parser=parse_recommendation)
def recommend_book(genre: str) -> Messages.Type:
return Messages.User(f"Recommend a {genre} book. Output only Title by Author")
print(recommend_book("fantasy"))
# Output: ('"The Name of the Wind"', 'Patrick Rothfuss')
from mirascope import llm, prompt_template
def parse_recommendation(response: llm.CallResponse) -> tuple[str, str]:
title, author = response.content.split(" by ")
return (title, author)
@llm.call(provider="openai", model="gpt-4o-mini", output_parser=parse_recommendation)
@prompt_template("Recommend a {genre} book. Output only Title by Author")
def recommend_book(genre: str): ...
print(recommend_book("fantasy"))
# Output: ('"The Name of the Wind"', 'Patrick Rothfuss')
from mirascope import llm, prompt_template
def parse_recommendation(response: llm.CallResponse) -> tuple[str, str]:
title, author = response.content.split(" by ")
return (title, author)
@llm.call(provider="anthropic", model="claude-3-5-sonnet-latest", output_parser=parse_recommendation)
@prompt_template("Recommend a {genre} book. Output only Title by Author")
def recommend_book(genre: str): ...
print(recommend_book("fantasy"))
# Output: ('"The Name of the Wind"', 'Patrick Rothfuss')
from mirascope import llm, prompt_template
def parse_recommendation(response: llm.CallResponse) -> tuple[str, str]:
title, author = response.content.split(" by ")
return (title, author)
@llm.call(provider="mistral", model="mistral-large-latest", output_parser=parse_recommendation)
@prompt_template("Recommend a {genre} book. Output only Title by Author")
def recommend_book(genre: str): ...
print(recommend_book("fantasy"))
# Output: ('"The Name of the Wind"', 'Patrick Rothfuss')
from mirascope import llm, prompt_template
def parse_recommendation(response: llm.CallResponse) -> tuple[str, str]:
title, author = response.content.split(" by ")
return (title, author)
@llm.call(provider="google", model="gemini-2.0-flash", output_parser=parse_recommendation)
@prompt_template("Recommend a {genre} book. Output only Title by Author")
def recommend_book(genre: str): ...
print(recommend_book("fantasy"))
# Output: ('"The Name of the Wind"', 'Patrick Rothfuss')
from mirascope import llm, prompt_template
def parse_recommendation(response: llm.CallResponse) -> tuple[str, str]:
title, author = response.content.split(" by ")
return (title, author)
@llm.call(provider="groq", model="llama-3.1-70b-versatile", output_parser=parse_recommendation)
@prompt_template("Recommend a {genre} book. Output only Title by Author")
def recommend_book(genre: str): ...
print(recommend_book("fantasy"))
# Output: ('"The Name of the Wind"', 'Patrick Rothfuss')
from mirascope import llm, prompt_template
def parse_recommendation(response: llm.CallResponse) -> tuple[str, str]:
title, author = response.content.split(" by ")
return (title, author)
@llm.call(provider="cohere", model="command-r-plus", output_parser=parse_recommendation)
@prompt_template("Recommend a {genre} book. Output only Title by Author")
def recommend_book(genre: str): ...
print(recommend_book("fantasy"))
# Output: ('"The Name of the Wind"', 'Patrick Rothfuss')
from mirascope import llm, prompt_template
def parse_recommendation(response: llm.CallResponse) -> tuple[str, str]:
title, author = response.content.split(" by ")
return (title, author)
@llm.call(provider="litellm", model="gpt-4o-mini", output_parser=parse_recommendation)
@prompt_template("Recommend a {genre} book. Output only Title by Author")
def recommend_book(genre: str): ...
print(recommend_book("fantasy"))
# Output: ('"The Name of the Wind"', 'Patrick Rothfuss')
from mirascope import llm, prompt_template
def parse_recommendation(response: llm.CallResponse) -> tuple[str, str]:
title, author = response.content.split(" by ")
return (title, author)
@llm.call(provider="azure", model="gpt-4o-mini", output_parser=parse_recommendation)
@prompt_template("Recommend a {genre} book. Output only Title by Author")
def recommend_book(genre: str): ...
print(recommend_book("fantasy"))
# Output: ('"The Name of the Wind"', 'Patrick Rothfuss')
from mirascope import llm, prompt_template
def parse_recommendation(response: llm.CallResponse) -> tuple[str, str]:
title, author = response.content.split(" by ")
return (title, author)
@llm.call(provider="bedrock", model="amazon.nova-lite-v1:0", output_parser=parse_recommendation)
@prompt_template("Recommend a {genre} book. Output only Title by Author")
def recommend_book(genre: str): ...
print(recommend_book("fantasy"))
# Output: ('"The Name of the Wind"', 'Patrick Rothfuss')
from mirascope import BaseMessageParam, llm
def parse_recommendation(response: llm.CallResponse) -> tuple[str, str]:
title, author = response.content.split(" by ")
return (title, author)
@llm.call(provider="openai", model="gpt-4o-mini", output_parser=parse_recommendation)
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [
BaseMessageParam(
role="user",
content=f"Recommend a {genre} book. Output only Title by Author",
)
]
print(recommend_book("fantasy"))
# Output: ('"The Name of the Wind"', 'Patrick Rothfuss')
from mirascope import BaseMessageParam, llm
def parse_recommendation(response: llm.CallResponse) -> tuple[str, str]:
title, author = response.content.split(" by ")
return (title, author)
@llm.call(provider="anthropic", model="claude-3-5-sonnet-latest", output_parser=parse_recommendation)
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [
BaseMessageParam(
role="user",
content=f"Recommend a {genre} book. Output only Title by Author",
)
]
print(recommend_book("fantasy"))
# Output: ('"The Name of the Wind"', 'Patrick Rothfuss')
from mirascope import BaseMessageParam, llm
def parse_recommendation(response: llm.CallResponse) -> tuple[str, str]:
title, author = response.content.split(" by ")
return (title, author)
@llm.call(provider="mistral", model="mistral-large-latest", output_parser=parse_recommendation)
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [
BaseMessageParam(
role="user",
content=f"Recommend a {genre} book. Output only Title by Author",
)
]
print(recommend_book("fantasy"))
# Output: ('"The Name of the Wind"', 'Patrick Rothfuss')
from mirascope import BaseMessageParam, llm
def parse_recommendation(response: llm.CallResponse) -> tuple[str, str]:
title, author = response.content.split(" by ")
return (title, author)
@llm.call(provider="google", model="gemini-2.0-flash", output_parser=parse_recommendation)
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [
BaseMessageParam(
role="user",
content=f"Recommend a {genre} book. Output only Title by Author",
)
]
print(recommend_book("fantasy"))
# Output: ('"The Name of the Wind"', 'Patrick Rothfuss')
from mirascope import BaseMessageParam, llm
def parse_recommendation(response: llm.CallResponse) -> tuple[str, str]:
title, author = response.content.split(" by ")
return (title, author)
@llm.call(provider="groq", model="llama-3.1-70b-versatile", output_parser=parse_recommendation)
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [
BaseMessageParam(
role="user",
content=f"Recommend a {genre} book. Output only Title by Author",
)
]
print(recommend_book("fantasy"))
# Output: ('"The Name of the Wind"', 'Patrick Rothfuss')
from mirascope import BaseMessageParam, llm
def parse_recommendation(response: llm.CallResponse) -> tuple[str, str]:
title, author = response.content.split(" by ")
return (title, author)
@llm.call(provider="cohere", model="command-r-plus", output_parser=parse_recommendation)
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [
BaseMessageParam(
role="user",
content=f"Recommend a {genre} book. Output only Title by Author",
)
]
print(recommend_book("fantasy"))
# Output: ('"The Name of the Wind"', 'Patrick Rothfuss')
from mirascope import BaseMessageParam, llm
def parse_recommendation(response: llm.CallResponse) -> tuple[str, str]:
title, author = response.content.split(" by ")
return (title, author)
@llm.call(provider="litellm", model="gpt-4o-mini", output_parser=parse_recommendation)
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [
BaseMessageParam(
role="user",
content=f"Recommend a {genre} book. Output only Title by Author",
)
]
print(recommend_book("fantasy"))
# Output: ('"The Name of the Wind"', 'Patrick Rothfuss')
from mirascope import BaseMessageParam, llm
def parse_recommendation(response: llm.CallResponse) -> tuple[str, str]:
title, author = response.content.split(" by ")
return (title, author)
@llm.call(provider="azure", model="gpt-4o-mini", output_parser=parse_recommendation)
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [
BaseMessageParam(
role="user",
content=f"Recommend a {genre} book. Output only Title by Author",
)
]
print(recommend_book("fantasy"))
# Output: ('"The Name of the Wind"', 'Patrick Rothfuss')
from mirascope import BaseMessageParam, llm
def parse_recommendation(response: llm.CallResponse) -> tuple[str, str]:
title, author = response.content.split(" by ")
return (title, author)
@llm.call(provider="bedrock", model="amazon.nova-lite-v1:0", output_parser=parse_recommendation)
def recommend_book(genre: str) -> list[BaseMessageParam]:
return [
BaseMessageParam(
role="user",
content=f"Recommend a {genre} book. Output only Title by Author",
)
]
print(recommend_book("fantasy"))
# Output: ('"The Name of the Wind"', 'Patrick Rothfuss')
Additional Examples¶
There are many different ways to structure and parse LLM outputs, ranging from XML parsing to using regular expressions.
Here are a few examples:
import re
from mirascope import llm, prompt_template
def parse_cot(response: llm.CallResponse) -> str:
pattern = r"<thinking>.?*</thinking>.*?<output>(.*?)</output>"
match = re.search(pattern, response.content, re.DOTALL)
if not match:
return response.content
return match.group(1).strip()
@llm.call(provider="openai", model="gpt-4o-mini", output_parser=parse_cot)
@prompt_template(
"""
First, output your thought process in <thinking> tags.
Then, provide your final output in <output> tags.
Question: {question}
"""
)
def chain_of_thought(question: str): ...
question = "Roger has 5 tennis balls. He buys 2 cans of 3. How many does he have now?"
output = chain_of_thought(question)
print(output)
import re
from mirascope import llm, prompt_template
def parse_cot(response: llm.CallResponse) -> str:
pattern = r"<thinking>.?*</thinking>.*?<output>(.*?)</output>"
match = re.search(pattern, response.content, re.DOTALL)
if not match:
return response.content
return match.group(1).strip()
@llm.call(provider="anthropic", model="claude-3-5-sonnet-latest", output_parser=parse_cot)
@prompt_template(
"""
First, output your thought process in <thinking> tags.
Then, provide your final output in <output> tags.
Question: {question}
"""
)
def chain_of_thought(question: str): ...
question = "Roger has 5 tennis balls. He buys 2 cans of 3. How many does he have now?"
output = chain_of_thought(question)
print(output)
import re
from mirascope import llm, prompt_template
def parse_cot(response: llm.CallResponse) -> str:
pattern = r"<thinking>.?*</thinking>.*?<output>(.*?)</output>"
match = re.search(pattern, response.content, re.DOTALL)
if not match:
return response.content
return match.group(1).strip()
@llm.call(provider="mistral", model="mistral-large-latest", output_parser=parse_cot)
@prompt_template(
"""
First, output your thought process in <thinking> tags.
Then, provide your final output in <output> tags.
Question: {question}
"""
)
def chain_of_thought(question: str): ...
question = "Roger has 5 tennis balls. He buys 2 cans of 3. How many does he have now?"
output = chain_of_thought(question)
print(output)
import re
from mirascope import llm, prompt_template
def parse_cot(response: llm.CallResponse) -> str:
pattern = r"<thinking>.?*</thinking>.*?<output>(.*?)</output>"
match = re.search(pattern, response.content, re.DOTALL)
if not match:
return response.content
return match.group(1).strip()
@llm.call(provider="google", model="gemini-2.0-flash", output_parser=parse_cot)
@prompt_template(
"""
First, output your thought process in <thinking> tags.
Then, provide your final output in <output> tags.
Question: {question}
"""
)
def chain_of_thought(question: str): ...
question = "Roger has 5 tennis balls. He buys 2 cans of 3. How many does he have now?"
output = chain_of_thought(question)
print(output)
import re
from mirascope import llm, prompt_template
def parse_cot(response: llm.CallResponse) -> str:
pattern = r"<thinking>.?*</thinking>.*?<output>(.*?)</output>"
match = re.search(pattern, response.content, re.DOTALL)
if not match:
return response.content
return match.group(1).strip()
@llm.call(provider="groq", model="llama-3.1-70b-versatile", output_parser=parse_cot)
@prompt_template(
"""
First, output your thought process in <thinking> tags.
Then, provide your final output in <output> tags.
Question: {question}
"""
)
def chain_of_thought(question: str): ...
question = "Roger has 5 tennis balls. He buys 2 cans of 3. How many does he have now?"
output = chain_of_thought(question)
print(output)
import re
from mirascope import llm, prompt_template
def parse_cot(response: llm.CallResponse) -> str:
pattern = r"<thinking>.?*</thinking>.*?<output>(.*?)</output>"
match = re.search(pattern, response.content, re.DOTALL)
if not match:
return response.content
return match.group(1).strip()
@llm.call(provider="cohere", model="command-r-plus", output_parser=parse_cot)
@prompt_template(
"""
First, output your thought process in <thinking> tags.
Then, provide your final output in <output> tags.
Question: {question}
"""
)
def chain_of_thought(question: str): ...
question = "Roger has 5 tennis balls. He buys 2 cans of 3. How many does he have now?"
output = chain_of_thought(question)
print(output)
import re
from mirascope import llm, prompt_template
def parse_cot(response: llm.CallResponse) -> str:
pattern = r"<thinking>.?*</thinking>.*?<output>(.*?)</output>"
match = re.search(pattern, response.content, re.DOTALL)
if not match:
return response.content
return match.group(1).strip()
@llm.call(provider="litellm", model="gpt-4o-mini", output_parser=parse_cot)
@prompt_template(
"""
First, output your thought process in <thinking> tags.
Then, provide your final output in <output> tags.
Question: {question}
"""
)
def chain_of_thought(question: str): ...
question = "Roger has 5 tennis balls. He buys 2 cans of 3. How many does he have now?"
output = chain_of_thought(question)
print(output)
import re
from mirascope import llm, prompt_template
def parse_cot(response: llm.CallResponse) -> str:
pattern = r"<thinking>.?*</thinking>.*?<output>(.*?)</output>"
match = re.search(pattern, response.content, re.DOTALL)
if not match:
return response.content
return match.group(1).strip()
@llm.call(provider="azure", model="gpt-4o-mini", output_parser=parse_cot)
@prompt_template(
"""
First, output your thought process in <thinking> tags.
Then, provide your final output in <output> tags.
Question: {question}
"""
)
def chain_of_thought(question: str): ...
question = "Roger has 5 tennis balls. He buys 2 cans of 3. How many does he have now?"
output = chain_of_thought(question)
print(output)
import re
from mirascope import llm, prompt_template
def parse_cot(response: llm.CallResponse) -> str:
pattern = r"<thinking>.?*</thinking>.*?<output>(.*?)</output>"
match = re.search(pattern, response.content, re.DOTALL)
if not match:
return response.content
return match.group(1).strip()
@llm.call(provider="bedrock", model="amazon.nova-lite-v1:0", output_parser=parse_cot)
@prompt_template(
"""
First, output your thought process in <thinking> tags.
Then, provide your final output in <output> tags.
Question: {question}
"""
)
def chain_of_thought(question: str): ...
question = "Roger has 5 tennis balls. He buys 2 cans of 3. How many does he have now?"
output = chain_of_thought(question)
print(output)
import xml.etree.ElementTree as ET
from mirascope import llm, prompt_template
from pydantic import BaseModel
class Book(BaseModel):
title: str
author: str
year: int
summary: str
def parse_book_xml(response: llm.CallResponse) -> Book | None:
try:
root = ET.fromstring(response.content)
if (node := root.find("title")) is None or not (title := node.text):
raise ValueError("Missing title")
if (node := root.find("author")) is None or not (author := node.text):
raise ValueError("Missing author")
if (node := root.find("year")) is None or not (year := node.text):
raise ValueError("Missing year")
if (node := root.find("summary")) is None or not (summary := node.text):
raise ValueError("Missing summary")
return Book(title=title, author=author, year=int(year), summary=summary)
except (ET.ParseError, ValueError) as e:
print(f"Error parsing XML: {e}")
return None
@llm.call(provider="openai", model="gpt-4o-mini", output_parser=parse_book_xml)
@prompt_template(
"""
Recommend a {genre} book. Provide the information in the following XML format:
<book>
<title>Book Title</title>
<author>Author Name</author>
<year>Publication Year</year>
<summary>Brief summary of the book</summary>
</book>
Output ONLY the XML and no other text.
"""
)
def recommend_book(genre: str): ...
book = recommend_book("science fiction")
if book:
print(f"Title: {book.title}")
print(f"Author: {book.author}")
print(f"Year: {book.year}")
print(f"Summary: {book.summary}")
else:
print("Failed to parse the recommendation.")
import xml.etree.ElementTree as ET
from mirascope import llm, prompt_template
from pydantic import BaseModel
class Book(BaseModel):
title: str
author: str
year: int
summary: str
def parse_book_xml(response: llm.CallResponse) -> Book | None:
try:
root = ET.fromstring(response.content)
if (node := root.find("title")) is None or not (title := node.text):
raise ValueError("Missing title")
if (node := root.find("author")) is None or not (author := node.text):
raise ValueError("Missing author")
if (node := root.find("year")) is None or not (year := node.text):
raise ValueError("Missing year")
if (node := root.find("summary")) is None or not (summary := node.text):
raise ValueError("Missing summary")
return Book(title=title, author=author, year=int(year), summary=summary)
except (ET.ParseError, ValueError) as e:
print(f"Error parsing XML: {e}")
return None
@llm.call(provider="anthropic", model="claude-3-5-sonnet-latest", output_parser=parse_book_xml)
@prompt_template(
"""
Recommend a {genre} book. Provide the information in the following XML format:
<book>
<title>Book Title</title>
<author>Author Name</author>
<year>Publication Year</year>
<summary>Brief summary of the book</summary>
</book>
Output ONLY the XML and no other text.
"""
)
def recommend_book(genre: str): ...
book = recommend_book("science fiction")
if book:
print(f"Title: {book.title}")
print(f"Author: {book.author}")
print(f"Year: {book.year}")
print(f"Summary: {book.summary}")
else:
print("Failed to parse the recommendation.")
import xml.etree.ElementTree as ET
from mirascope import llm, prompt_template
from pydantic import BaseModel
class Book(BaseModel):
title: str
author: str
year: int
summary: str
def parse_book_xml(response: llm.CallResponse) -> Book | None:
try:
root = ET.fromstring(response.content)
if (node := root.find("title")) is None or not (title := node.text):
raise ValueError("Missing title")
if (node := root.find("author")) is None or not (author := node.text):
raise ValueError("Missing author")
if (node := root.find("year")) is None or not (year := node.text):
raise ValueError("Missing year")
if (node := root.find("summary")) is None or not (summary := node.text):
raise ValueError("Missing summary")
return Book(title=title, author=author, year=int(year), summary=summary)
except (ET.ParseError, ValueError) as e:
print(f"Error parsing XML: {e}")
return None
@llm.call(provider="mistral", model="mistral-large-latest", output_parser=parse_book_xml)
@prompt_template(
"""
Recommend a {genre} book. Provide the information in the following XML format:
<book>
<title>Book Title</title>
<author>Author Name</author>
<year>Publication Year</year>
<summary>Brief summary of the book</summary>
</book>
Output ONLY the XML and no other text.
"""
)
def recommend_book(genre: str): ...
book = recommend_book("science fiction")
if book:
print(f"Title: {book.title}")
print(f"Author: {book.author}")
print(f"Year: {book.year}")
print(f"Summary: {book.summary}")
else:
print("Failed to parse the recommendation.")
import xml.etree.ElementTree as ET
from mirascope import llm, prompt_template
from pydantic import BaseModel
class Book(BaseModel):
title: str
author: str
year: int
summary: str
def parse_book_xml(response: llm.CallResponse) -> Book | None:
try:
root = ET.fromstring(response.content)
if (node := root.find("title")) is None or not (title := node.text):
raise ValueError("Missing title")
if (node := root.find("author")) is None or not (author := node.text):
raise ValueError("Missing author")
if (node := root.find("year")) is None or not (year := node.text):
raise ValueError("Missing year")
if (node := root.find("summary")) is None or not (summary := node.text):
raise ValueError("Missing summary")
return Book(title=title, author=author, year=int(year), summary=summary)
except (ET.ParseError, ValueError) as e:
print(f"Error parsing XML: {e}")
return None
@llm.call(provider="google", model="gemini-2.0-flash", output_parser=parse_book_xml)
@prompt_template(
"""
Recommend a {genre} book. Provide the information in the following XML format:
<book>
<title>Book Title</title>
<author>Author Name</author>
<year>Publication Year</year>
<summary>Brief summary of the book</summary>
</book>
Output ONLY the XML and no other text.
"""
)
def recommend_book(genre: str): ...
book = recommend_book("science fiction")
if book:
print(f"Title: {book.title}")
print(f"Author: {book.author}")
print(f"Year: {book.year}")
print(f"Summary: {book.summary}")
else:
print("Failed to parse the recommendation.")
import xml.etree.ElementTree as ET
from mirascope import llm, prompt_template
from pydantic import BaseModel
class Book(BaseModel):
title: str
author: str
year: int
summary: str
def parse_book_xml(response: llm.CallResponse) -> Book | None:
try:
root = ET.fromstring(response.content)
if (node := root.find("title")) is None or not (title := node.text):
raise ValueError("Missing title")
if (node := root.find("author")) is None or not (author := node.text):
raise ValueError("Missing author")
if (node := root.find("year")) is None or not (year := node.text):
raise ValueError("Missing year")
if (node := root.find("summary")) is None or not (summary := node.text):
raise ValueError("Missing summary")
return Book(title=title, author=author, year=int(year), summary=summary)
except (ET.ParseError, ValueError) as e:
print(f"Error parsing XML: {e}")
return None
@llm.call(provider="groq", model="llama-3.1-70b-versatile", output_parser=parse_book_xml)
@prompt_template(
"""
Recommend a {genre} book. Provide the information in the following XML format:
<book>
<title>Book Title</title>
<author>Author Name</author>
<year>Publication Year</year>
<summary>Brief summary of the book</summary>
</book>
Output ONLY the XML and no other text.
"""
)
def recommend_book(genre: str): ...
book = recommend_book("science fiction")
if book:
print(f"Title: {book.title}")
print(f"Author: {book.author}")
print(f"Year: {book.year}")
print(f"Summary: {book.summary}")
else:
print("Failed to parse the recommendation.")
import xml.etree.ElementTree as ET
from mirascope import llm, prompt_template
from pydantic import BaseModel
class Book(BaseModel):
title: str
author: str
year: int
summary: str
def parse_book_xml(response: llm.CallResponse) -> Book | None:
try:
root = ET.fromstring(response.content)
if (node := root.find("title")) is None or not (title := node.text):
raise ValueError("Missing title")
if (node := root.find("author")) is None or not (author := node.text):
raise ValueError("Missing author")
if (node := root.find("year")) is None or not (year := node.text):
raise ValueError("Missing year")
if (node := root.find("summary")) is None or not (summary := node.text):
raise ValueError("Missing summary")
return Book(title=title, author=author, year=int(year), summary=summary)
except (ET.ParseError, ValueError) as e:
print(f"Error parsing XML: {e}")
return None
@llm.call(provider="cohere", model="command-r-plus", output_parser=parse_book_xml)
@prompt_template(
"""
Recommend a {genre} book. Provide the information in the following XML format:
<book>
<title>Book Title</title>
<author>Author Name</author>
<year>Publication Year</year>
<summary>Brief summary of the book</summary>
</book>
Output ONLY the XML and no other text.
"""
)
def recommend_book(genre: str): ...
book = recommend_book("science fiction")
if book:
print(f"Title: {book.title}")
print(f"Author: {book.author}")
print(f"Year: {book.year}")
print(f"Summary: {book.summary}")
else:
print("Failed to parse the recommendation.")
import xml.etree.ElementTree as ET
from mirascope import llm, prompt_template
from pydantic import BaseModel
class Book(BaseModel):
title: str
author: str
year: int
summary: str
def parse_book_xml(response: llm.CallResponse) -> Book | None:
try:
root = ET.fromstring(response.content)
if (node := root.find("title")) is None or not (title := node.text):
raise ValueError("Missing title")
if (node := root.find("author")) is None or not (author := node.text):
raise ValueError("Missing author")
if (node := root.find("year")) is None or not (year := node.text):
raise ValueError("Missing year")
if (node := root.find("summary")) is None or not (summary := node.text):
raise ValueError("Missing summary")
return Book(title=title, author=author, year=int(year), summary=summary)
except (ET.ParseError, ValueError) as e:
print(f"Error parsing XML: {e}")
return None
@llm.call(provider="litellm", model="gpt-4o-mini", output_parser=parse_book_xml)
@prompt_template(
"""
Recommend a {genre} book. Provide the information in the following XML format:
<book>
<title>Book Title</title>
<author>Author Name</author>
<year>Publication Year</year>
<summary>Brief summary of the book</summary>
</book>
Output ONLY the XML and no other text.
"""
)
def recommend_book(genre: str): ...
book = recommend_book("science fiction")
if book:
print(f"Title: {book.title}")
print(f"Author: {book.author}")
print(f"Year: {book.year}")
print(f"Summary: {book.summary}")
else:
print("Failed to parse the recommendation.")
import xml.etree.ElementTree as ET
from mirascope import llm, prompt_template
from pydantic import BaseModel
class Book(BaseModel):
title: str
author: str
year: int
summary: str
def parse_book_xml(response: llm.CallResponse) -> Book | None:
try:
root = ET.fromstring(response.content)
if (node := root.find("title")) is None or not (title := node.text):
raise ValueError("Missing title")
if (node := root.find("author")) is None or not (author := node.text):
raise ValueError("Missing author")
if (node := root.find("year")) is None or not (year := node.text):
raise ValueError("Missing year")
if (node := root.find("summary")) is None or not (summary := node.text):
raise ValueError("Missing summary")
return Book(title=title, author=author, year=int(year), summary=summary)
except (ET.ParseError, ValueError) as e:
print(f"Error parsing XML: {e}")
return None
@llm.call(provider="azure", model="gpt-4o-mini", output_parser=parse_book_xml)
@prompt_template(
"""
Recommend a {genre} book. Provide the information in the following XML format:
<book>
<title>Book Title</title>
<author>Author Name</author>
<year>Publication Year</year>
<summary>Brief summary of the book</summary>
</book>
Output ONLY the XML and no other text.
"""
)
def recommend_book(genre: str): ...
book = recommend_book("science fiction")
if book:
print(f"Title: {book.title}")
print(f"Author: {book.author}")
print(f"Year: {book.year}")
print(f"Summary: {book.summary}")
else:
print("Failed to parse the recommendation.")
import xml.etree.ElementTree as ET
from mirascope import llm, prompt_template
from pydantic import BaseModel
class Book(BaseModel):
title: str
author: str
year: int
summary: str
def parse_book_xml(response: llm.CallResponse) -> Book | None:
try:
root = ET.fromstring(response.content)
if (node := root.find("title")) is None or not (title := node.text):
raise ValueError("Missing title")
if (node := root.find("author")) is None or not (author := node.text):
raise ValueError("Missing author")
if (node := root.find("year")) is None or not (year := node.text):
raise ValueError("Missing year")
if (node := root.find("summary")) is None or not (summary := node.text):
raise ValueError("Missing summary")
return Book(title=title, author=author, year=int(year), summary=summary)
except (ET.ParseError, ValueError) as e:
print(f"Error parsing XML: {e}")
return None
@llm.call(provider="bedrock", model="amazon.nova-lite-v1:0", output_parser=parse_book_xml)
@prompt_template(
"""
Recommend a {genre} book. Provide the information in the following XML format:
<book>
<title>Book Title</title>
<author>Author Name</author>
<year>Publication Year</year>
<summary>Brief summary of the book</summary>
</book>
Output ONLY the XML and no other text.
"""
)
def recommend_book(genre: str): ...
book = recommend_book("science fiction")
if book:
print(f"Title: {book.title}")
print(f"Author: {book.author}")
print(f"Year: {book.year}")
print(f"Summary: {book.summary}")
else:
print("Failed to parse the recommendation.")
import json
from mirascope import llm
def only_json(response: llm.CallResponse) -> str:
json_start = response.content.index("{")
json_end = response.content.rfind("}")
return response.content[json_start : json_end + 1]
@llm.call(
provider="openai", model="gpt-4o-mini", json_mode=True, output_parser=only_json
)
def json_extraction(text: str, fields: list[str]) -> str:
return f"Extract {fields} from the following text: {text}"
json_response = json_extraction(
text="The capital of France is Paris",
fields=["capital", "country"],
)
print(json.loads(json_response))
import json
from mirascope import llm
def only_json(response: llm.CallResponse) -> str:
json_start = response.content.index("{")
json_end = response.content.rfind("}")
return response.content[json_start : json_end + 1]
@llm.call(
provider="anthropic", model="claude-3-5-sonnet-latest", json_mode=True, output_parser=only_json
)
def json_extraction(text: str, fields: list[str]) -> str:
return f"Extract {fields} from the following text: {text}"
json_response = json_extraction(
text="The capital of France is Paris",
fields=["capital", "country"],
)
print(json.loads(json_response))
import json
from mirascope import llm
def only_json(response: llm.CallResponse) -> str:
json_start = response.content.index("{")
json_end = response.content.rfind("}")
return response.content[json_start : json_end + 1]
@llm.call(
provider="mistral", model="mistral-large-latest", json_mode=True, output_parser=only_json
)
def json_extraction(text: str, fields: list[str]) -> str:
return f"Extract {fields} from the following text: {text}"
json_response = json_extraction(
text="The capital of France is Paris",
fields=["capital", "country"],
)
print(json.loads(json_response))
import json
from mirascope import llm
def only_json(response: llm.CallResponse) -> str:
json_start = response.content.index("{")
json_end = response.content.rfind("}")
return response.content[json_start : json_end + 1]
@llm.call(
provider="google", model="gemini-2.0-flash", json_mode=True, output_parser=only_json
)
def json_extraction(text: str, fields: list[str]) -> str:
return f"Extract {fields} from the following text: {text}"
json_response = json_extraction(
text="The capital of France is Paris",
fields=["capital", "country"],
)
print(json.loads(json_response))
import json
from mirascope import llm
def only_json(response: llm.CallResponse) -> str:
json_start = response.content.index("{")
json_end = response.content.rfind("}")
return response.content[json_start : json_end + 1]
@llm.call(
provider="groq", model="llama-3.1-70b-versatile", json_mode=True, output_parser=only_json
)
def json_extraction(text: str, fields: list[str]) -> str:
return f"Extract {fields} from the following text: {text}"
json_response = json_extraction(
text="The capital of France is Paris",
fields=["capital", "country"],
)
print(json.loads(json_response))
import json
from mirascope import llm
def only_json(response: llm.CallResponse) -> str:
json_start = response.content.index("{")
json_end = response.content.rfind("}")
return response.content[json_start : json_end + 1]
@llm.call(
provider="cohere", model="command-r-plus", json_mode=True, output_parser=only_json
)
def json_extraction(text: str, fields: list[str]) -> str:
return f"Extract {fields} from the following text: {text}"
json_response = json_extraction(
text="The capital of France is Paris",
fields=["capital", "country"],
)
print(json.loads(json_response))
import json
from mirascope import llm
def only_json(response: llm.CallResponse) -> str:
json_start = response.content.index("{")
json_end = response.content.rfind("}")
return response.content[json_start : json_end + 1]
@llm.call(
provider="litellm", model="gpt-4o-mini", json_mode=True, output_parser=only_json
)
def json_extraction(text: str, fields: list[str]) -> str:
return f"Extract {fields} from the following text: {text}"
json_response = json_extraction(
text="The capital of France is Paris",
fields=["capital", "country"],
)
print(json.loads(json_response))
import json
from mirascope import llm
def only_json(response: llm.CallResponse) -> str:
json_start = response.content.index("{")
json_end = response.content.rfind("}")
return response.content[json_start : json_end + 1]
@llm.call(
provider="azure", model="gpt-4o-mini", json_mode=True, output_parser=only_json
)
def json_extraction(text: str, fields: list[str]) -> str:
return f"Extract {fields} from the following text: {text}"
json_response = json_extraction(
text="The capital of France is Paris",
fields=["capital", "country"],
)
print(json.loads(json_response))
import json
from mirascope import llm
def only_json(response: llm.CallResponse) -> str:
json_start = response.content.index("{")
json_end = response.content.rfind("}")
return response.content[json_start : json_end + 1]
@llm.call(
provider="bedrock", model="amazon.nova-lite-v1:0", json_mode=True, output_parser=only_json
)
def json_extraction(text: str, fields: list[str]) -> str:
return f"Extract {fields} from the following text: {text}"
json_response = json_extraction(
text="The capital of France is Paris",
fields=["capital", "country"],
)
print(json.loads(json_response))
Next Steps¶
By leveraging Output Parsers effectively, you can create more robust and reliable LLM-powered applications, ensuring that the raw model outputs are transformed into structured data that's easy to work with in your application logic.
Next, we recommend taking a look at the section on Tools to learn how to extend the capabilities of LLMs with custom functions.