# responses
## <ApiType type="Attribute" slug="any-response" symbolName="AnyResponse" /> AnyResponse
**Type:** <TypeLink type={{"type_str": "TypeAlias", "description": null, "kind": "simple", "symbol_name": "TypeAlias", "doc_url": null}} />
## <ApiType type="Attribute" slug="async-chunk-iterator" symbolName="AsyncChunkIterator" /> AsyncChunkIterator
**Type:** <TypeLink type={{"type_str": "TypeAlias", "description": null, "kind": "simple", "symbol_name": "TypeAlias", "doc_url": null}} />
Asynchronous iterator yielding chunks with raw data.
## <ApiType type="Class" slug="async-context-response" symbolName="AsyncContextResponse" /> AsyncContextResponse
The response generated by an LLM from an async context call.
**Bases:**
<TypeLink type={{"type_str": "BaseResponse[AsyncContextToolkit[DepsT], FormattableT]", "description": null, "kind": "generic", "base_type": {"type_str": "BaseResponse", "description": null, "kind": "simple", "symbol_name": "BaseResponse", "doc_url": null}, "parameters": [{"type_str": "AsyncContextToolkit[DepsT]", "description": null, "kind": "generic", "base_type": {"type_str": "AsyncContextToolkit", "description": null, "kind": "simple", "symbol_name": "AsyncContextToolkit", "doc_url": "/docs/api/llm/tools#async-context-toolkit"}, "parameters": [{"type_str": "DepsT", "description": null, "kind": "simple", "symbol_name": "DepsT", "doc_url": null}], "doc_url": null}, {"type_str": "FormattableT", "description": null, "kind": "simple", "symbol_name": "FormattableT", "doc_url": "/docs/api/llm/formatting#formattable-t"}], "doc_url": null}} />, <TypeLink type={{"type_str": "Generic[DepsT, FormattableT]", "description": null, "kind": "generic", "base_type": {"type_str": "Generic", "description": null, "kind": "simple", "symbol_name": "Generic", "doc_url": "https://docs.python.org/3/library/typing.html#typing.Generic"}, "parameters": [{"type_str": "DepsT", "description": null, "kind": "simple", "symbol_name": "DepsT", "doc_url": null}, {"type_str": "FormattableT", "description": null, "kind": "simple", "symbol_name": "FormattableT", "doc_url": "/docs/api/llm/formatting#formattable-t"}], "doc_url": null}} />
## <ApiType type="Function" slug="mirascope-llm-responses-response--async-context-response-execute_tools" symbolName="execute_tools" /> execute_tools
Execute and return all of the tool calls in the response.
<ParametersTable
parameters={[
{
"name": "self",
"type_info": {
"type_str": "Any",
"description": null,
"kind": "simple",
"symbol_name": null,
"doc_url": null
}
},
{
"name": "ctx",
"type_info": {
"type_str": "Context[DepsT]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Context",
"description": null,
"kind": "simple",
"symbol_name": "Context",
"doc_url": null
},
"parameters": [
{
"type_str": "DepsT",
"description": null,
"kind": "simple",
"symbol_name": "DepsT",
"doc_url": null
}
],
"doc_url": null
},
"description": "A `Context` with the required deps type."
}
]}
/>
<ReturnTable
returnType={{
"type_info": {
"type_str": "Sequence[ToolOutput[Jsonable]]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Sequence",
"description": null,
"kind": "simple",
"symbol_name": "Sequence",
"doc_url": "https://docs.python.org/3/library/typing.html#typing.Sequence"
},
"parameters": [
{
"type_str": "ToolOutput[Jsonable]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "ToolOutput",
"description": null,
"kind": "simple",
"symbol_name": "ToolOutput",
"doc_url": "/docs/api/llm/content#tool-output"
},
"parameters": [
{
"type_str": "Jsonable",
"description": null,
"kind": "simple",
"symbol_name": "Jsonable",
"doc_url": "/docs/api/llm/types#jsonable"
}
],
"doc_url": null
}
],
"doc_url": null
},
"description": "A sequence containing a `ToolOutput` for every tool call in the order they appeared."
}}
/>
## <ApiType type="Function" slug="mirascope-llm-responses-response--async-context-response-resume" symbolName="resume" /> resume
Generate a new `AsyncContextResponse` using this response's messages with additional user content.
Uses this response's tools and format type. Also uses this response's provider,
model, client, and params, unless the model context manager is being used to
provide a new LLM as an override.
<ParametersTable
parameters={[
{
"name": "self",
"type_info": {
"type_str": "Any",
"description": null,
"kind": "simple",
"symbol_name": null,
"doc_url": null
}
},
{
"name": "ctx",
"type_info": {
"type_str": "Context[DepsT]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Context",
"description": null,
"kind": "simple",
"symbol_name": "Context",
"doc_url": null
},
"parameters": [
{
"type_str": "DepsT",
"description": null,
"kind": "simple",
"symbol_name": "DepsT",
"doc_url": null
}
],
"doc_url": null
},
"description": "A Context with the required deps type."
},
{
"name": "content",
"type_info": {
"type_str": "UserContent",
"description": null,
"kind": "simple",
"symbol_name": "UserContent",
"doc_url": "/docs/api/llm/messages#user-content"
},
"description": "The new user message content to append to the message history."
}
]}
/>
<ReturnTable
returnType={{
"type_info": {
"type_str": "AsyncContextResponse[DepsT] | AsyncContextResponse[DepsT, FormattableT]",
"description": null,
"kind": "union",
"base_type": {
"type_str": "Union",
"description": null,
"kind": "simple",
"symbol_name": "Union",
"doc_url": "https://docs.python.org/3/library/typing.html#typing.Union"
},
"parameters": [
{
"type_str": "AsyncContextResponse[DepsT]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "AsyncContextResponse",
"description": null,
"kind": "simple",
"symbol_name": "AsyncContextResponse",
"doc_url": "/docs/api/llm/responses#async-context-response"
},
"parameters": [
{
"type_str": "DepsT",
"description": null,
"kind": "simple",
"symbol_name": "DepsT",
"doc_url": null
}
],
"doc_url": null
},
{
"type_str": "AsyncContextResponse[DepsT, FormattableT]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "AsyncContextResponse",
"description": null,
"kind": "simple",
"symbol_name": "AsyncContextResponse",
"doc_url": "/docs/api/llm/responses#async-context-response"
},
"parameters": [
{
"type_str": "DepsT",
"description": null,
"kind": "simple",
"symbol_name": "DepsT",
"doc_url": null
},
{
"type_str": "FormattableT",
"description": null,
"kind": "simple",
"symbol_name": "FormattableT",
"doc_url": "/docs/api/llm/formatting#formattable-t"
}
],
"doc_url": null
}
],
"doc_url": null
},
"description": "A new `AsyncContextResponse` instance generated from the extended message history."
}}
/>
## <ApiType type="Class" slug="async-context-stream-response" symbolName="AsyncContextStreamResponse" /> AsyncContextStreamResponse
An `AsyncContextStreamResponse` wraps response content from the LLM with a streaming interface.
This class supports iteration to process chunks as they arrive from the model.
Content can be streamed in one of three ways:
- Via `.streams()`, which provides an iterator of streams, where each
stream contains chunks of streamed data. The chunks contain `delta`s (new content
in that particular chunk), and the stream itself accumulates the collected state
of all the chunks processed thus far.
- Via `.chunk_stream()` which allows iterating over Mirascope's provider-
agnostic chunk representation.
- Via `.pretty_stream()` a helper method which provides all response content
as `str` deltas. Iterating through `pretty_stream` will yield text content and
optionally placeholder representations for other content types, but it will still
consume the full stream.
- Via `.structured_stream()`, a helper method which provides partial
structured outputs from a response (useful when FormatT is set). Iterating through
`structured_stream` will only yield structured partials, but it will still consume
the full stream.
As chunks are consumed, they are collected in-memory on the `AsyncContextStreamResponse`, and they
become available in `.content`, `.messages`, `.tool_calls`, etc. All of the stream
iterators can be restarted after the stream has been consumed, in which case they
will yield chunks from memory in the original sequence that came from the LLM. If
the stream is only partially consumed, a fresh iterator will first iterate through
in-memory content, and then will continue consuming fresh chunks from the LLM.
In the specific case of text chunks, they are included in the response content as soon
as they become available, via an `llm.Text` part that updates as more deltas come in.
This enables the behavior where resuming a partially-streamed response will include
as much text as the model generated.
For other chunks, like `Thinking` or `ToolCall`, they are only added to response
content once the corresponding part has fully streamed. This avoids issues like
adding incomplete tool calls, or thinking blocks missing signatures, to the response.
For each iterator, fully iterating through the iterator will consume the whole
LLM stream. You can pause stream execution midway by breaking out of the iterator,
and you can safely resume execution from the same iterator if desired.
**Bases:**
<TypeLink type={{"type_str": "BaseAsyncStreamResponse[AsyncContextToolkit[DepsT], FormattableT]", "description": null, "kind": "generic", "base_type": {"type_str": "BaseAsyncStreamResponse", "description": null, "kind": "simple", "symbol_name": "BaseAsyncStreamResponse", "doc_url": null}, "parameters": [{"type_str": "AsyncContextToolkit[DepsT]", "description": null, "kind": "generic", "base_type": {"type_str": "AsyncContextToolkit", "description": null, "kind": "simple", "symbol_name": "AsyncContextToolkit", "doc_url": "/docs/api/llm/tools#async-context-toolkit"}, "parameters": [{"type_str": "DepsT", "description": null, "kind": "simple", "symbol_name": "DepsT", "doc_url": null}], "doc_url": null}, {"type_str": "FormattableT", "description": null, "kind": "simple", "symbol_name": "FormattableT", "doc_url": "/docs/api/llm/formatting#formattable-t"}], "doc_url": null}} />, <TypeLink type={{"type_str": "Generic[DepsT, FormattableT]", "description": null, "kind": "generic", "base_type": {"type_str": "Generic", "description": null, "kind": "simple", "symbol_name": "Generic", "doc_url": "https://docs.python.org/3/library/typing.html#typing.Generic"}, "parameters": [{"type_str": "DepsT", "description": null, "kind": "simple", "symbol_name": "DepsT", "doc_url": null}, {"type_str": "FormattableT", "description": null, "kind": "simple", "symbol_name": "FormattableT", "doc_url": "/docs/api/llm/formatting#formattable-t"}], "doc_url": null}} />
## <ApiType type="Function" slug="mirascope-llm-responses-stream_response--async-context-stream-response-execute_tools" symbolName="execute_tools" /> execute_tools
Execute and return all of the tool calls in the response.
<ParametersTable
parameters={[
{
"name": "self",
"type_info": {
"type_str": "Any",
"description": null,
"kind": "simple",
"symbol_name": null,
"doc_url": null
}
},
{
"name": "ctx",
"type_info": {
"type_str": "Context[DepsT]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Context",
"description": null,
"kind": "simple",
"symbol_name": "Context",
"doc_url": null
},
"parameters": [
{
"type_str": "DepsT",
"description": null,
"kind": "simple",
"symbol_name": "DepsT",
"doc_url": null
}
],
"doc_url": null
},
"description": "A `Context` with the required deps type."
}
]}
/>
<ReturnTable
returnType={{
"type_info": {
"type_str": "Sequence[ToolOutput[Jsonable]]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Sequence",
"description": null,
"kind": "simple",
"symbol_name": "Sequence",
"doc_url": "https://docs.python.org/3/library/typing.html#typing.Sequence"
},
"parameters": [
{
"type_str": "ToolOutput[Jsonable]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "ToolOutput",
"description": null,
"kind": "simple",
"symbol_name": "ToolOutput",
"doc_url": "/docs/api/llm/content#tool-output"
},
"parameters": [
{
"type_str": "Jsonable",
"description": null,
"kind": "simple",
"symbol_name": "Jsonable",
"doc_url": "/docs/api/llm/types#jsonable"
}
],
"doc_url": null
}
],
"doc_url": null
},
"description": "A sequence containing a `ToolOutput` for every tool call in the order they appeared."
}}
/>
## <ApiType type="Function" slug="mirascope-llm-responses-stream_response--async-context-stream-response-resume" symbolName="resume" /> resume
Generate a new `AsyncContextStreamResponse` using this response's messages with additional user content.
Uses this response's tools and format type. Also uses this response's provider,
model, client, and params, unless the model context manager is being used to
provide a new LLM as an override.
<ParametersTable
parameters={[
{
"name": "self",
"type_info": {
"type_str": "Any",
"description": null,
"kind": "simple",
"symbol_name": null,
"doc_url": null
}
},
{
"name": "ctx",
"type_info": {
"type_str": "Context[DepsT]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Context",
"description": null,
"kind": "simple",
"symbol_name": "Context",
"doc_url": null
},
"parameters": [
{
"type_str": "DepsT",
"description": null,
"kind": "simple",
"symbol_name": "DepsT",
"doc_url": null
}
],
"doc_url": null
},
"description": "A Context with the required deps type."
},
{
"name": "content",
"type_info": {
"type_str": "UserContent",
"description": null,
"kind": "simple",
"symbol_name": "UserContent",
"doc_url": "/docs/api/llm/messages#user-content"
},
"description": "The new user message content to append to the message history."
}
]}
/>
<ReturnTable
returnType={{
"type_info": {
"type_str": "AsyncContextStreamResponse[DepsT] | AsyncContextStreamResponse[DepsT, FormattableT]",
"description": null,
"kind": "union",
"base_type": {
"type_str": "Union",
"description": null,
"kind": "simple",
"symbol_name": "Union",
"doc_url": "https://docs.python.org/3/library/typing.html#typing.Union"
},
"parameters": [
{
"type_str": "AsyncContextStreamResponse[DepsT]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "AsyncContextStreamResponse",
"description": null,
"kind": "simple",
"symbol_name": "AsyncContextStreamResponse",
"doc_url": "/docs/api/llm/responses#async-context-stream-response"
},
"parameters": [
{
"type_str": "DepsT",
"description": null,
"kind": "simple",
"symbol_name": "DepsT",
"doc_url": null
}
],
"doc_url": null
},
{
"type_str": "AsyncContextStreamResponse[DepsT, FormattableT]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "AsyncContextStreamResponse",
"description": null,
"kind": "simple",
"symbol_name": "AsyncContextStreamResponse",
"doc_url": "/docs/api/llm/responses#async-context-stream-response"
},
"parameters": [
{
"type_str": "DepsT",
"description": null,
"kind": "simple",
"symbol_name": "DepsT",
"doc_url": null
},
{
"type_str": "FormattableT",
"description": null,
"kind": "simple",
"symbol_name": "FormattableT",
"doc_url": "/docs/api/llm/formatting#formattable-t"
}
],
"doc_url": null
}
],
"doc_url": null
},
"description": "A new `AsyncContextStreamResponse` instance generated from the extended message history."
}}
/>
## <ApiType type="Class" slug="async-response" symbolName="AsyncResponse" /> AsyncResponse
The response generated by an LLM in async mode.
**Bases:**
<TypeLink type={{"type_str": "BaseResponse[AsyncToolkit, FormattableT]", "description": null, "kind": "generic", "base_type": {"type_str": "BaseResponse", "description": null, "kind": "simple", "symbol_name": "BaseResponse", "doc_url": null}, "parameters": [{"type_str": "AsyncToolkit", "description": null, "kind": "simple", "symbol_name": "AsyncToolkit", "doc_url": "/docs/api/llm/tools#async-toolkit"}, {"type_str": "FormattableT", "description": null, "kind": "simple", "symbol_name": "FormattableT", "doc_url": "/docs/api/llm/formatting#formattable-t"}], "doc_url": null}} />
## <ApiType type="Function" slug="mirascope-llm-responses-response--async-response-execute_tools" symbolName="execute_tools" /> execute_tools
Execute and return all of the tool calls in the response.
<ParametersTable
parameters={[
{
"name": "self",
"type_info": {
"type_str": "Any",
"description": null,
"kind": "simple",
"symbol_name": null,
"doc_url": null
}
}
]}
/>
<ReturnTable
returnType={{
"type_info": {
"type_str": "Sequence[ToolOutput[Jsonable]]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Sequence",
"description": null,
"kind": "simple",
"symbol_name": "Sequence",
"doc_url": "https://docs.python.org/3/library/typing.html#typing.Sequence"
},
"parameters": [
{
"type_str": "ToolOutput[Jsonable]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "ToolOutput",
"description": null,
"kind": "simple",
"symbol_name": "ToolOutput",
"doc_url": "/docs/api/llm/content#tool-output"
},
"parameters": [
{
"type_str": "Jsonable",
"description": null,
"kind": "simple",
"symbol_name": "Jsonable",
"doc_url": "/docs/api/llm/types#jsonable"
}
],
"doc_url": null
}
],
"doc_url": null
},
"description": "A sequence containing a `ToolOutput` for every tool call in the order they appeared."
}}
/>
## <ApiType type="Function" slug="mirascope-llm-responses-response--async-response-resume" symbolName="resume" /> resume
Generate a new `AsyncResponse` using this response's messages with additional user content.
Uses this response's tools and format type. Also uses this response's provider,
model, client, and params, unless the model context manager is being used to
provide a new LLM as an override.
<ParametersTable
parameters={[
{
"name": "self",
"type_info": {
"type_str": "Any",
"description": null,
"kind": "simple",
"symbol_name": null,
"doc_url": null
}
},
{
"name": "content",
"type_info": {
"type_str": "UserContent",
"description": null,
"kind": "simple",
"symbol_name": "UserContent",
"doc_url": "/docs/api/llm/messages#user-content"
},
"description": "The new user message content to append to the message history."
}
]}
/>
<ReturnTable
returnType={{
"type_info": {
"type_str": "AsyncResponse | AsyncResponse[FormattableT]",
"description": null,
"kind": "union",
"base_type": {
"type_str": "Union",
"description": null,
"kind": "simple",
"symbol_name": "Union",
"doc_url": "https://docs.python.org/3/library/typing.html#typing.Union"
},
"parameters": [
{
"type_str": "AsyncResponse",
"description": null,
"kind": "simple",
"symbol_name": "AsyncResponse",
"doc_url": "/docs/api/llm/responses#async-response"
},
{
"type_str": "AsyncResponse[FormattableT]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "AsyncResponse",
"description": null,
"kind": "simple",
"symbol_name": "AsyncResponse",
"doc_url": "/docs/api/llm/responses#async-response"
},
"parameters": [
{
"type_str": "FormattableT",
"description": null,
"kind": "simple",
"symbol_name": "FormattableT",
"doc_url": "/docs/api/llm/formatting#formattable-t"
}
],
"doc_url": null
}
],
"doc_url": null
},
"description": "A new `AsyncResponse` instance generated from the extended message history."
}}
/>
## <ApiType type="Attribute" slug="async-stream" symbolName="AsyncStream" /> AsyncStream
**Type:** <TypeLink type={{"type_str": "TypeAlias", "description": null, "kind": "simple", "symbol_name": "TypeAlias", "doc_url": null}} />
An asynchronous assistant content stream.
## <ApiType type="Class" slug="async-stream-response" symbolName="AsyncStreamResponse" /> AsyncStreamResponse
An `AsyncStreamResponse` wraps response content from the LLM with a streaming interface.
This class supports iteration to process chunks as they arrive from the model.
Content can be streamed in one of three ways:
- Via `.streams()`, which provides an iterator of streams, where each
stream contains chunks of streamed data. The chunks contain `delta`s (new content
in that particular chunk), and the stream itself accumulates the collected state
of all the chunks processed thus far.
- Via `.chunk_stream()` which allows iterating over Mirascope's provider-
agnostic chunk representation.
- Via `.pretty_stream()` a helper method which provides all response content
as `str` deltas. Iterating through `pretty_stream` will yield text content and
optionally placeholder representations for other content types, but it will still
consume the full stream.
- Via `.structured_stream()`, a helper method which provides partial
structured outputs from a response (useful when FormatT is set). Iterating through
`structured_stream` will only yield structured partials, but it will still consume
the full stream.
As chunks are consumed, they are collected in-memory on the `AsyncContextStreamResponse`, and they
become available in `.content`, `.messages`, `.tool_calls`, etc. All of the stream
iterators can be restarted after the stream has been consumed, in which case they
will yield chunks from memory in the original sequence that came from the LLM. If
the stream is only partially consumed, a fresh iterator will first iterate through
in-memory content, and then will continue consuming fresh chunks from the LLM.
In the specific case of text chunks, they are included in the response content as soon
as they become available, via an `llm.Text` part that updates as more deltas come in.
This enables the behavior where resuming a partially-streamed response will include
as much text as the model generated.
For other chunks, like `Thinking` or `ToolCall`, they are only added to response
content once the corresponding part has fully streamed. This avoids issues like
adding incomplete tool calls, or thinking blocks missing signatures, to the response.
For each iterator, fully iterating through the iterator will consume the whole
LLM stream. You can pause stream execution midway by breaking out of the iterator,
and you can safely resume execution from the same iterator if desired.
**Bases:**
<TypeLink type={{"type_str": "BaseAsyncStreamResponse[AsyncToolkit, FormattableT]", "description": null, "kind": "generic", "base_type": {"type_str": "BaseAsyncStreamResponse", "description": null, "kind": "simple", "symbol_name": "BaseAsyncStreamResponse", "doc_url": null}, "parameters": [{"type_str": "AsyncToolkit", "description": null, "kind": "simple", "symbol_name": "AsyncToolkit", "doc_url": "/docs/api/llm/tools#async-toolkit"}, {"type_str": "FormattableT", "description": null, "kind": "simple", "symbol_name": "FormattableT", "doc_url": "/docs/api/llm/formatting#formattable-t"}], "doc_url": null}} />
## <ApiType type="Function" slug="mirascope-llm-responses-stream_response--async-stream-response-execute_tools" symbolName="execute_tools" /> execute_tools
Execute and return all of the tool calls in the response.
<ParametersTable
parameters={[
{
"name": "self",
"type_info": {
"type_str": "Any",
"description": null,
"kind": "simple",
"symbol_name": null,
"doc_url": null
}
}
]}
/>
<ReturnTable
returnType={{
"type_info": {
"type_str": "Sequence[ToolOutput[Jsonable]]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Sequence",
"description": null,
"kind": "simple",
"symbol_name": "Sequence",
"doc_url": "https://docs.python.org/3/library/typing.html#typing.Sequence"
},
"parameters": [
{
"type_str": "ToolOutput[Jsonable]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "ToolOutput",
"description": null,
"kind": "simple",
"symbol_name": "ToolOutput",
"doc_url": "/docs/api/llm/content#tool-output"
},
"parameters": [
{
"type_str": "Jsonable",
"description": null,
"kind": "simple",
"symbol_name": "Jsonable",
"doc_url": "/docs/api/llm/types#jsonable"
}
],
"doc_url": null
}
],
"doc_url": null
},
"description": "A sequence containing a `ToolOutput` for every tool call in the order they appeared."
}}
/>
## <ApiType type="Function" slug="mirascope-llm-responses-stream_response--async-stream-response-resume" symbolName="resume" /> resume
Generate a new `AsyncStreamResponse` using this response's messages with additional user content.
Uses this response's tools and format type. Also uses this response's provider,
model, client, and params, unless the model context manager is being used to
provide a new LLM as an override.
<ParametersTable
parameters={[
{
"name": "self",
"type_info": {
"type_str": "Any",
"description": null,
"kind": "simple",
"symbol_name": null,
"doc_url": null
}
},
{
"name": "content",
"type_info": {
"type_str": "UserContent",
"description": null,
"kind": "simple",
"symbol_name": "UserContent",
"doc_url": "/docs/api/llm/messages#user-content"
},
"description": "The new user message content to append to the message history."
}
]}
/>
<ReturnTable
returnType={{
"type_info": {
"type_str": "AsyncStreamResponse | AsyncStreamResponse[FormattableT]",
"description": null,
"kind": "union",
"base_type": {
"type_str": "Union",
"description": null,
"kind": "simple",
"symbol_name": "Union",
"doc_url": "https://docs.python.org/3/library/typing.html#typing.Union"
},
"parameters": [
{
"type_str": "AsyncStreamResponse",
"description": null,
"kind": "simple",
"symbol_name": "AsyncStreamResponse",
"doc_url": "/docs/api/llm/responses#async-stream-response"
},
{
"type_str": "AsyncStreamResponse[FormattableT]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "AsyncStreamResponse",
"description": null,
"kind": "simple",
"symbol_name": "AsyncStreamResponse",
"doc_url": "/docs/api/llm/responses#async-stream-response"
},
"parameters": [
{
"type_str": "FormattableT",
"description": null,
"kind": "simple",
"symbol_name": "FormattableT",
"doc_url": "/docs/api/llm/formatting#formattable-t"
}
],
"doc_url": null
}
],
"doc_url": null
},
"description": "A new `AsyncStreamResponse` instance generated from the extended message history."
}}
/>
## <ApiType type="Class" slug="async-text-stream" symbolName="AsyncTextStream" /> AsyncTextStream
Asynchronous text stream implementation.
**Bases:**
<TypeLink type={{"type_str": "BaseAsyncStream[Text, str]", "description": null, "kind": "generic", "base_type": {"type_str": "BaseAsyncStream", "description": null, "kind": "simple", "symbol_name": "BaseAsyncStream", "doc_url": null}, "parameters": [{"type_str": "Text", "description": null, "kind": "simple", "symbol_name": "Text", "doc_url": "/docs/api/llm/content#text"}, {"type_str": "str", "description": null, "kind": "simple", "symbol_name": "str", "doc_url": "https://docs.python.org/3/library/stdtypes.html#str"}], "doc_url": null}} />
<AttributesTable
attributes={[
{
"name": "type",
"type_info": {
"type_str": "Literal['async_text_stream']",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Literal",
"description": null,
"kind": "simple",
"symbol_name": "Literal",
"doc_url": null
},
"parameters": [
{
"type_str": "'async_text_stream'",
"description": null,
"kind": "simple",
"symbol_name": "'async_text_stream'",
"doc_url": null
}
],
"doc_url": null
}
},
{
"name": "content_type",
"type_info": {
"type_str": "Literal['text']",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Literal",
"description": null,
"kind": "simple",
"symbol_name": "Literal",
"doc_url": null
},
"parameters": [
{
"type_str": "'text'",
"description": null,
"kind": "simple",
"symbol_name": "'text'",
"doc_url": null
}
],
"doc_url": null
},
"description": "The type of content stored in this stream."
},
{
"name": "partial_text",
"type_info": {
"type_str": "str",
"description": null,
"kind": "simple",
"symbol_name": "str",
"doc_url": null
},
"description": "The accumulated text content as chunks are received."
}
]}
/>
## <ApiType type="Function" slug="mirascope-llm-responses-streams--async-text-stream-collect" symbolName="collect" /> collect
Asynchronously collect all chunks and return the final Text content.
<ParametersTable
parameters={[
{
"name": "self",
"type_info": {
"type_str": "Any",
"description": null,
"kind": "simple",
"symbol_name": null,
"doc_url": null
}
}
]}
/>
<ReturnTable
returnType={{
"type_info": {
"type_str": "Text",
"description": null,
"kind": "simple",
"symbol_name": "Text",
"doc_url": "/docs/api/llm/content#text"
},
"description": "The complete text content after consuming all chunks."
}}
/>
## <ApiType type="Class" slug="async-thought-stream" symbolName="AsyncThoughtStream" /> AsyncThoughtStream
Asynchronous thought stream implementation.
**Bases:**
<TypeLink type={{"type_str": "BaseAsyncStream[Thought, str]", "description": null, "kind": "generic", "base_type": {"type_str": "BaseAsyncStream", "description": null, "kind": "simple", "symbol_name": "BaseAsyncStream", "doc_url": null}, "parameters": [{"type_str": "Thought", "description": null, "kind": "simple", "symbol_name": "Thought", "doc_url": "/docs/api/llm/content#thought"}, {"type_str": "str", "description": null, "kind": "simple", "symbol_name": "str", "doc_url": "https://docs.python.org/3/library/stdtypes.html#str"}], "doc_url": null}} />
<AttributesTable
attributes={[
{
"name": "type",
"type_info": {
"type_str": "Literal['async_thought_stream']",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Literal",
"description": null,
"kind": "simple",
"symbol_name": "Literal",
"doc_url": null
},
"parameters": [
{
"type_str": "'async_thought_stream'",
"description": null,
"kind": "simple",
"symbol_name": "'async_thought_stream'",
"doc_url": null
}
],
"doc_url": null
}
},
{
"name": "content_type",
"type_info": {
"type_str": "Literal['thought']",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Literal",
"description": null,
"kind": "simple",
"symbol_name": "Literal",
"doc_url": null
},
"parameters": [
{
"type_str": "'thought'",
"description": null,
"kind": "simple",
"symbol_name": "'thought'",
"doc_url": null
}
],
"doc_url": null
},
"description": "The type of content stored in this stream."
},
{
"name": "partial_thought",
"type_info": {
"type_str": "str",
"description": null,
"kind": "simple",
"symbol_name": "str",
"doc_url": null
},
"description": "The accumulated thought content as chunks are received."
}
]}
/>
## <ApiType type="Function" slug="mirascope-llm-responses-streams--async-thought-stream-collect" symbolName="collect" /> collect
Asynchronously collect all chunks and return the final Thought content.
<ParametersTable
parameters={[
{
"name": "self",
"type_info": {
"type_str": "Any",
"description": null,
"kind": "simple",
"symbol_name": null,
"doc_url": null
}
}
]}
/>
<ReturnTable
returnType={{
"type_info": {
"type_str": "Thought",
"description": null,
"kind": "simple",
"symbol_name": "Thought",
"doc_url": "/docs/api/llm/content#thought"
},
"description": "The complete thought content after consuming all chunks."
}}
/>
## <ApiType type="Class" slug="async-tool-call-stream" symbolName="AsyncToolCallStream" /> AsyncToolCallStream
Asynchronous tool call stream implementation.
**Bases:**
<TypeLink type={{"type_str": "BaseAsyncStream[ToolCall, str]", "description": null, "kind": "generic", "base_type": {"type_str": "BaseAsyncStream", "description": null, "kind": "simple", "symbol_name": "BaseAsyncStream", "doc_url": null}, "parameters": [{"type_str": "ToolCall", "description": null, "kind": "simple", "symbol_name": "ToolCall", "doc_url": "/docs/api/llm/content#tool-call"}, {"type_str": "str", "description": null, "kind": "simple", "symbol_name": "str", "doc_url": "https://docs.python.org/3/library/stdtypes.html#str"}], "doc_url": null}} />
<AttributesTable
attributes={[
{
"name": "type",
"type_info": {
"type_str": "Literal['async_tool_call_stream']",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Literal",
"description": null,
"kind": "simple",
"symbol_name": "Literal",
"doc_url": null
},
"parameters": [
{
"type_str": "'async_tool_call_stream'",
"description": null,
"kind": "simple",
"symbol_name": "'async_tool_call_stream'",
"doc_url": null
}
],
"doc_url": null
}
},
{
"name": "content_type",
"type_info": {
"type_str": "Literal['tool_call']",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Literal",
"description": null,
"kind": "simple",
"symbol_name": "Literal",
"doc_url": null
},
"parameters": [
{
"type_str": "'tool_call'",
"description": null,
"kind": "simple",
"symbol_name": "'tool_call'",
"doc_url": null
}
],
"doc_url": null
},
"description": "The type of content stored in this stream."
},
{
"name": "tool_id",
"type_info": {
"type_str": "str",
"description": null,
"kind": "simple",
"symbol_name": "str",
"doc_url": null
},
"description": "A unique identifier for this tool call."
},
{
"name": "tool_name",
"type_info": {
"type_str": "str",
"description": null,
"kind": "simple",
"symbol_name": "str",
"doc_url": null
},
"description": "The name of the tool being called."
},
{
"name": "partial_args",
"type_info": {
"type_str": "str",
"description": null,
"kind": "simple",
"symbol_name": "str",
"doc_url": null
},
"description": "The accumulated tool arguments as chunks are received."
}
]}
/>
## <ApiType type="Function" slug="mirascope-llm-responses-streams--async-tool-call-stream-collect" symbolName="collect" /> collect
Asynchronously collect all chunks and return the final ToolCall content.
<ParametersTable
parameters={[
{
"name": "self",
"type_info": {
"type_str": "Any",
"description": null,
"kind": "simple",
"symbol_name": null,
"doc_url": null
}
}
]}
/>
<ReturnTable
returnType={{
"type_info": {
"type_str": "ToolCall",
"description": null,
"kind": "simple",
"symbol_name": "ToolCall",
"doc_url": "/docs/api/llm/content#tool-call"
},
"description": "The complete tool call after consuming all chunks."
}}
/>
## <ApiType type="Attribute" slug="chunk-iterator" symbolName="ChunkIterator" /> ChunkIterator
**Type:** <TypeLink type={{"type_str": "TypeAlias", "description": null, "kind": "simple", "symbol_name": "TypeAlias", "doc_url": null}} />
Synchronous iterator yielding chunks with raw data.
## <ApiType type="Class" slug="context-response" symbolName="ContextResponse" /> ContextResponse
The response generated by an LLM from a context call.
**Bases:**
<TypeLink type={{"type_str": "BaseResponse[ContextToolkit[DepsT], FormattableT]", "description": null, "kind": "generic", "base_type": {"type_str": "BaseResponse", "description": null, "kind": "simple", "symbol_name": "BaseResponse", "doc_url": null}, "parameters": [{"type_str": "ContextToolkit[DepsT]", "description": null, "kind": "generic", "base_type": {"type_str": "ContextToolkit", "description": null, "kind": "simple", "symbol_name": "ContextToolkit", "doc_url": "/docs/api/llm/tools#context-toolkit"}, "parameters": [{"type_str": "DepsT", "description": null, "kind": "simple", "symbol_name": "DepsT", "doc_url": null}], "doc_url": null}, {"type_str": "FormattableT", "description": null, "kind": "simple", "symbol_name": "FormattableT", "doc_url": "/docs/api/llm/formatting#formattable-t"}], "doc_url": null}} />, <TypeLink type={{"type_str": "Generic[DepsT, FormattableT]", "description": null, "kind": "generic", "base_type": {"type_str": "Generic", "description": null, "kind": "simple", "symbol_name": "Generic", "doc_url": "https://docs.python.org/3/library/typing.html#typing.Generic"}, "parameters": [{"type_str": "DepsT", "description": null, "kind": "simple", "symbol_name": "DepsT", "doc_url": null}, {"type_str": "FormattableT", "description": null, "kind": "simple", "symbol_name": "FormattableT", "doc_url": "/docs/api/llm/formatting#formattable-t"}], "doc_url": null}} />
## <ApiType type="Function" slug="mirascope-llm-responses-response--context-response-execute_tools" symbolName="execute_tools" /> execute_tools
Execute and return all of the tool calls in the response.
<ParametersTable
parameters={[
{
"name": "self",
"type_info": {
"type_str": "Any",
"description": null,
"kind": "simple",
"symbol_name": null,
"doc_url": null
}
},
{
"name": "ctx",
"type_info": {
"type_str": "Context[DepsT]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Context",
"description": null,
"kind": "simple",
"symbol_name": "Context",
"doc_url": null
},
"parameters": [
{
"type_str": "DepsT",
"description": null,
"kind": "simple",
"symbol_name": "DepsT",
"doc_url": null
}
],
"doc_url": null
},
"description": "A `Context` with the required deps type."
}
]}
/>
<ReturnTable
returnType={{
"type_info": {
"type_str": "Sequence[ToolOutput[Jsonable]]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Sequence",
"description": null,
"kind": "simple",
"symbol_name": "Sequence",
"doc_url": "https://docs.python.org/3/library/typing.html#typing.Sequence"
},
"parameters": [
{
"type_str": "ToolOutput[Jsonable]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "ToolOutput",
"description": null,
"kind": "simple",
"symbol_name": "ToolOutput",
"doc_url": "/docs/api/llm/content#tool-output"
},
"parameters": [
{
"type_str": "Jsonable",
"description": null,
"kind": "simple",
"symbol_name": "Jsonable",
"doc_url": "/docs/api/llm/types#jsonable"
}
],
"doc_url": null
}
],
"doc_url": null
},
"description": "A sequence containing a `ToolOutput` for every tool call."
}}
/>
## <ApiType type="Function" slug="mirascope-llm-responses-response--context-response-resume" symbolName="resume" /> resume
Generate a new `ContextResponse` using this response's messages with additional user content.
Uses this response's tools and format type. Also uses this response's provider,
model, client, and params, unless the model context manager is being used to
provide a new LLM as an override.
<ParametersTable
parameters={[
{
"name": "self",
"type_info": {
"type_str": "Any",
"description": null,
"kind": "simple",
"symbol_name": null,
"doc_url": null
}
},
{
"name": "ctx",
"type_info": {
"type_str": "Context[DepsT]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Context",
"description": null,
"kind": "simple",
"symbol_name": "Context",
"doc_url": null
},
"parameters": [
{
"type_str": "DepsT",
"description": null,
"kind": "simple",
"symbol_name": "DepsT",
"doc_url": null
}
],
"doc_url": null
},
"description": "A `Context` with the required deps type."
},
{
"name": "content",
"type_info": {
"type_str": "UserContent",
"description": null,
"kind": "simple",
"symbol_name": "UserContent",
"doc_url": "/docs/api/llm/messages#user-content"
},
"description": "The new user message content to append to the message history."
}
]}
/>
<ReturnTable
returnType={{
"type_info": {
"type_str": "ContextResponse[DepsT] | ContextResponse[DepsT, FormattableT]",
"description": null,
"kind": "union",
"base_type": {
"type_str": "Union",
"description": null,
"kind": "simple",
"symbol_name": "Union",
"doc_url": "https://docs.python.org/3/library/typing.html#typing.Union"
},
"parameters": [
{
"type_str": "ContextResponse[DepsT]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "ContextResponse",
"description": null,
"kind": "simple",
"symbol_name": "ContextResponse",
"doc_url": "/docs/api/llm/responses#context-response"
},
"parameters": [
{
"type_str": "DepsT",
"description": null,
"kind": "simple",
"symbol_name": "DepsT",
"doc_url": null
}
],
"doc_url": null
},
{
"type_str": "ContextResponse[DepsT, FormattableT]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "ContextResponse",
"description": null,
"kind": "simple",
"symbol_name": "ContextResponse",
"doc_url": "/docs/api/llm/responses#context-response"
},
"parameters": [
{
"type_str": "DepsT",
"description": null,
"kind": "simple",
"symbol_name": "DepsT",
"doc_url": null
},
{
"type_str": "FormattableT",
"description": null,
"kind": "simple",
"symbol_name": "FormattableT",
"doc_url": "/docs/api/llm/formatting#formattable-t"
}
],
"doc_url": null
}
],
"doc_url": null
},
"description": "A new `ContextResponse` instance generated from the extended message history."
}}
/>
## <ApiType type="Class" slug="context-stream-response" symbolName="ContextStreamResponse" /> ContextStreamResponse
A `ContextStreamResponse` wraps response content from the LLM with a streaming interface.
This class supports iteration to process chunks as they arrive from the model.
Content can be streamed in one of three ways:
- Via `.streams()`, which provides an iterator of streams, where each
stream contains chunks of streamed data. The chunks contain `delta`s (new content
in that particular chunk), and the stream itself accumulates the collected state
of all the chunks processed thus far.
- Via `.chunk_stream()` which allows iterating over Mirascope's provider-
agnostic chunk representation.
- Via `.pretty_stream()` a helper method which provides all response content
as `str` deltas. Iterating through `pretty_stream` will yield text content and
optionally placeholder representations for other content types, but it will still
consume the full stream.
- Via `.structured_stream()`, a helper method which provides partial
structured outputs from a response (useful when FormatT is set). Iterating through
`structured_stream` will only yield structured partials, but it will still consume
the full stream.
As chunks are consumed, they are collected in-memory on the `ContextStreamResponse`, and they
become available in `.content`, `.messages`, `.tool_calls`, etc. All of the stream
iterators can be restarted after the stream has been consumed, in which case they
will yield chunks from memory in the original sequence that came from the LLM. If
the stream is only partially consumed, a fresh iterator will first iterate through
in-memory content, and then will continue consuming fresh chunks from the LLM.
In the specific case of text chunks, they are included in the response content as soon
as they become available, via an `llm.Text` part that updates as more deltas come in.
This enables the behavior where resuming a partially-streamed response will include
as much text as the model generated.
For other chunks, like `Thinking` or `ToolCall`, they are only added to response
content once the corresponding part has fully streamed. This avoids issues like
adding incomplete tool calls, or thinking blocks missing signatures, to the response.
For each iterator, fully iterating through the iterator will consume the whole
LLM stream. You can pause stream execution midway by breaking out of the iterator,
and you can safely resume execution from the same iterator if desired.
**Bases:**
<TypeLink type={{"type_str": "BaseSyncStreamResponse[ContextToolkit[DepsT], FormattableT]", "description": null, "kind": "generic", "base_type": {"type_str": "BaseSyncStreamResponse", "description": null, "kind": "simple", "symbol_name": "BaseSyncStreamResponse", "doc_url": null}, "parameters": [{"type_str": "ContextToolkit[DepsT]", "description": null, "kind": "generic", "base_type": {"type_str": "ContextToolkit", "description": null, "kind": "simple", "symbol_name": "ContextToolkit", "doc_url": "/docs/api/llm/tools#context-toolkit"}, "parameters": [{"type_str": "DepsT", "description": null, "kind": "simple", "symbol_name": "DepsT", "doc_url": null}], "doc_url": null}, {"type_str": "FormattableT", "description": null, "kind": "simple", "symbol_name": "FormattableT", "doc_url": "/docs/api/llm/formatting#formattable-t"}], "doc_url": null}} />, <TypeLink type={{"type_str": "Generic[DepsT, FormattableT]", "description": null, "kind": "generic", "base_type": {"type_str": "Generic", "description": null, "kind": "simple", "symbol_name": "Generic", "doc_url": "https://docs.python.org/3/library/typing.html#typing.Generic"}, "parameters": [{"type_str": "DepsT", "description": null, "kind": "simple", "symbol_name": "DepsT", "doc_url": null}, {"type_str": "FormattableT", "description": null, "kind": "simple", "symbol_name": "FormattableT", "doc_url": "/docs/api/llm/formatting#formattable-t"}], "doc_url": null}} />
## <ApiType type="Function" slug="mirascope-llm-responses-stream_response--context-stream-response-execute_tools" symbolName="execute_tools" /> execute_tools
Execute and return all of the tool calls in the response.
<ParametersTable
parameters={[
{
"name": "self",
"type_info": {
"type_str": "Any",
"description": null,
"kind": "simple",
"symbol_name": null,
"doc_url": null
}
},
{
"name": "ctx",
"type_info": {
"type_str": "Context[DepsT]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Context",
"description": null,
"kind": "simple",
"symbol_name": "Context",
"doc_url": null
},
"parameters": [
{
"type_str": "DepsT",
"description": null,
"kind": "simple",
"symbol_name": "DepsT",
"doc_url": null
}
],
"doc_url": null
},
"description": "A `Context` with the required deps type."
}
]}
/>
<ReturnTable
returnType={{
"type_info": {
"type_str": "Sequence[ToolOutput[Jsonable]]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Sequence",
"description": null,
"kind": "simple",
"symbol_name": "Sequence",
"doc_url": "https://docs.python.org/3/library/typing.html#typing.Sequence"
},
"parameters": [
{
"type_str": "ToolOutput[Jsonable]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "ToolOutput",
"description": null,
"kind": "simple",
"symbol_name": "ToolOutput",
"doc_url": "/docs/api/llm/content#tool-output"
},
"parameters": [
{
"type_str": "Jsonable",
"description": null,
"kind": "simple",
"symbol_name": "Jsonable",
"doc_url": "/docs/api/llm/types#jsonable"
}
],
"doc_url": null
}
],
"doc_url": null
},
"description": "A sequence containing a `ToolOutput` for every tool call."
}}
/>
## <ApiType type="Function" slug="mirascope-llm-responses-stream_response--context-stream-response-resume" symbolName="resume" /> resume
Generate a new `ContextStreamResponse` using this response's messages with additional user content.
Uses this response's tools and format type. Also uses this response's provider,
model, client, and params, unless the model context manager is being used to
provide a new LLM as an override.
<ParametersTable
parameters={[
{
"name": "self",
"type_info": {
"type_str": "Any",
"description": null,
"kind": "simple",
"symbol_name": null,
"doc_url": null
}
},
{
"name": "ctx",
"type_info": {
"type_str": "Context[DepsT]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Context",
"description": null,
"kind": "simple",
"symbol_name": "Context",
"doc_url": null
},
"parameters": [
{
"type_str": "DepsT",
"description": null,
"kind": "simple",
"symbol_name": "DepsT",
"doc_url": null
}
],
"doc_url": null
},
"description": "A Context with the required deps type."
},
{
"name": "content",
"type_info": {
"type_str": "UserContent",
"description": null,
"kind": "simple",
"symbol_name": "UserContent",
"doc_url": "/docs/api/llm/messages#user-content"
},
"description": "The new user message content to append to the message history."
}
]}
/>
<ReturnTable
returnType={{
"type_info": {
"type_str": "ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]",
"description": null,
"kind": "union",
"base_type": {
"type_str": "Union",
"description": null,
"kind": "simple",
"symbol_name": "Union",
"doc_url": "https://docs.python.org/3/library/typing.html#typing.Union"
},
"parameters": [
{
"type_str": "ContextStreamResponse[DepsT]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "ContextStreamResponse",
"description": null,
"kind": "simple",
"symbol_name": "ContextStreamResponse",
"doc_url": "/docs/api/llm/responses#context-stream-response"
},
"parameters": [
{
"type_str": "DepsT",
"description": null,
"kind": "simple",
"symbol_name": "DepsT",
"doc_url": null
}
],
"doc_url": null
},
{
"type_str": "ContextStreamResponse[DepsT, FormattableT]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "ContextStreamResponse",
"description": null,
"kind": "simple",
"symbol_name": "ContextStreamResponse",
"doc_url": "/docs/api/llm/responses#context-stream-response"
},
"parameters": [
{
"type_str": "DepsT",
"description": null,
"kind": "simple",
"symbol_name": "DepsT",
"doc_url": null
},
{
"type_str": "FormattableT",
"description": null,
"kind": "simple",
"symbol_name": "FormattableT",
"doc_url": "/docs/api/llm/formatting#formattable-t"
}
],
"doc_url": null
}
],
"doc_url": null
},
"description": "A new `ContextStreamResponse` instance generated from the extended message history."
}}
/>
## <ApiType type="Class" slug="finish-reason" symbolName="FinishReason" /> FinishReason
The reason why the LLM finished generating a response.
`FinishReason` is only set when the response did not have a normal finish (e.g. it
ran out of tokens). When a response finishes generating normally, no finish reason
is set.
**Bases:**
<TypeLink type={{"type_str": "str", "description": null, "kind": "simple", "symbol_name": "str", "doc_url": "https://docs.python.org/3/library/stdtypes.html#str"}} />, <TypeLink type={{"type_str": "Enum", "description": null, "kind": "simple", "symbol_name": "Enum", "doc_url": "https://docs.python.org/3/library/enum.html#enum.Enum"}} />
<AttributesTable
attributes={[
{
"name": "MAX_TOKENS",
"type_info": {
"type_str": "'max_tokens'",
"description": null,
"kind": "simple",
"symbol_name": "'max_tokens'",
"doc_url": null
}
},
{
"name": "REFUSAL",
"type_info": {
"type_str": "'refusal'",
"description": null,
"kind": "simple",
"symbol_name": "'refusal'",
"doc_url": null
}
},
{
"name": "CONTEXT_LENGTH_EXCEEDED",
"type_info": {
"type_str": "'context_length_exceeded'",
"description": null,
"kind": "simple",
"symbol_name": "'context_length_exceeded'",
"doc_url": null
}
}
]}
/>
## <ApiType type="Class" slug="finish-reason-chunk" symbolName="FinishReasonChunk" /> FinishReasonChunk
Represents the finish reason for a completed stream.
<AttributesTable
attributes={[
{
"name": "type",
"type_info": {
"type_str": "Literal['finish_reason_chunk']",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Literal",
"description": null,
"kind": "simple",
"symbol_name": "Literal",
"doc_url": null
},
"parameters": [
{
"type_str": "'finish_reason_chunk'",
"description": null,
"kind": "simple",
"symbol_name": "'finish_reason_chunk'",
"doc_url": null
}
],
"doc_url": null
}
},
{
"name": "finish_reason",
"type_info": {
"type_str": "FinishReason",
"description": null,
"kind": "simple",
"symbol_name": "FinishReason",
"doc_url": null
},
"description": "The reason the stream finished."
}
]}
/>
## <ApiType type="Class" slug="raw-message-chunk" symbolName="RawMessageChunk" /> RawMessageChunk
A chunk containing provider-specific raw message content that will be added to the `AssistantMessage`.
This chunk contains a provider-specific representation of a piece of content that
will be added to the `AssistantMessage` reconstructed by the containing stream.
This content should be a Jsonable Python object for serialization purposes.
The intention is that this content may be passed as-is back to the provider when the
generated `AssistantMessage` is being reused in conversation.
<AttributesTable
attributes={[
{
"name": "type",
"type_info": {
"type_str": "Literal['raw_message_chunk']",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Literal",
"description": null,
"kind": "simple",
"symbol_name": "Literal",
"doc_url": null
},
"parameters": [
{
"type_str": "'raw_message_chunk'",
"description": null,
"kind": "simple",
"symbol_name": "'raw_message_chunk'",
"doc_url": null
}
],
"doc_url": null
}
},
{
"name": "raw_message",
"type_info": {
"type_str": "Jsonable",
"description": null,
"kind": "simple",
"symbol_name": "Jsonable",
"doc_url": null
},
"description": "The provider-specific raw content.\n\nShould be a Jsonable object."
}
]}
/>
## <ApiType type="Class" slug="raw-stream-event-chunk" symbolName="RawStreamEventChunk" /> RawStreamEventChunk
A chunk containing a raw stream event from the underlying provider.
Will be accumulated on `StreamResponse.raw` for debugging purposes.
<AttributesTable
attributes={[
{
"name": "type",
"type_info": {
"type_str": "Literal['raw_stream_event_chunk']",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Literal",
"description": null,
"kind": "simple",
"symbol_name": "Literal",
"doc_url": null
},
"parameters": [
{
"type_str": "'raw_stream_event_chunk'",
"description": null,
"kind": "simple",
"symbol_name": "'raw_stream_event_chunk'",
"doc_url": null
}
],
"doc_url": null
}
},
{
"name": "raw_stream_event",
"type_info": {
"type_str": "Any",
"description": null,
"kind": "simple",
"symbol_name": "Any",
"doc_url": null
},
"description": "The raw stream event from the underlying provider."
}
]}
/>
## <ApiType type="Class" slug="response" symbolName="Response" /> Response
The response generated by an LLM.
**Bases:**
<TypeLink type={{"type_str": "BaseResponse[Toolkit, FormattableT]", "description": null, "kind": "generic", "base_type": {"type_str": "BaseResponse", "description": null, "kind": "simple", "symbol_name": "BaseResponse", "doc_url": null}, "parameters": [{"type_str": "Toolkit", "description": null, "kind": "simple", "symbol_name": "Toolkit", "doc_url": "/docs/api/llm/tools#toolkit"}, {"type_str": "FormattableT", "description": null, "kind": "simple", "symbol_name": "FormattableT", "doc_url": "/docs/api/llm/formatting#formattable-t"}], "doc_url": null}} />
## <ApiType type="Function" slug="mirascope-llm-responses-response--response-execute_tools" symbolName="execute_tools" /> execute_tools
Execute and return all of the tool calls in the response.
<ParametersTable
parameters={[
{
"name": "self",
"type_info": {
"type_str": "Any",
"description": null,
"kind": "simple",
"symbol_name": null,
"doc_url": null
}
}
]}
/>
<ReturnTable
returnType={{
"type_info": {
"type_str": "Sequence[ToolOutput[Jsonable]]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Sequence",
"description": null,
"kind": "simple",
"symbol_name": "Sequence",
"doc_url": "https://docs.python.org/3/library/typing.html#typing.Sequence"
},
"parameters": [
{
"type_str": "ToolOutput[Jsonable]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "ToolOutput",
"description": null,
"kind": "simple",
"symbol_name": "ToolOutput",
"doc_url": "/docs/api/llm/content#tool-output"
},
"parameters": [
{
"type_str": "Jsonable",
"description": null,
"kind": "simple",
"symbol_name": "Jsonable",
"doc_url": "/docs/api/llm/types#jsonable"
}
],
"doc_url": null
}
],
"doc_url": null
},
"description": "A sequence containing a `ToolOutput` for every tool call in the order they appeared."
}}
/>
## <ApiType type="Function" slug="mirascope-llm-responses-response--response-resume" symbolName="resume" /> resume
Generate a new `Response` using this response's messages with additional user content.
Uses this response's tools and format type. Also uses this response's provider,
model, client, and params, unless the model context manager is being used to
provide a new LLM as an override.
<ParametersTable
parameters={[
{
"name": "self",
"type_info": {
"type_str": "Any",
"description": null,
"kind": "simple",
"symbol_name": null,
"doc_url": null
}
},
{
"name": "content",
"type_info": {
"type_str": "UserContent",
"description": null,
"kind": "simple",
"symbol_name": "UserContent",
"doc_url": "/docs/api/llm/messages#user-content"
},
"description": "The new user message content to append to the message history."
}
]}
/>
<ReturnTable
returnType={{
"type_info": {
"type_str": "Response | Response[FormattableT]",
"description": null,
"kind": "union",
"base_type": {
"type_str": "Union",
"description": null,
"kind": "simple",
"symbol_name": "Union",
"doc_url": "https://docs.python.org/3/library/typing.html#typing.Union"
},
"parameters": [
{
"type_str": "Response",
"description": null,
"kind": "simple",
"symbol_name": "Response",
"doc_url": "/docs/api/llm/responses#response"
},
{
"type_str": "Response[FormattableT]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Response",
"description": null,
"kind": "simple",
"symbol_name": "Response",
"doc_url": "/docs/api/llm/responses#response"
},
"parameters": [
{
"type_str": "FormattableT",
"description": null,
"kind": "simple",
"symbol_name": "FormattableT",
"doc_url": "/docs/api/llm/formatting#formattable-t"
}
],
"doc_url": null
}
],
"doc_url": null
},
"description": "A new `Response` instance generated from the extended message history."
}}
/>
## <ApiType type="Attribute" slug="response-t" symbolName="ResponseT" /> ResponseT
**Type:** <TypeLink type={{"type_str": "TypeVar('ResponseT', bound='BaseResponse[Any, Any]')", "description": null, "kind": "simple", "symbol_name": null, "doc_url": null}} />
## <ApiType type="Class" slug="root-response" symbolName="RootResponse" /> RootResponse
Base class for LLM responses.
**Bases:**
<TypeLink type={{"type_str": "Generic[ToolkitT, FormattableT]", "description": null, "kind": "generic", "base_type": {"type_str": "Generic", "description": null, "kind": "simple", "symbol_name": "Generic", "doc_url": "https://docs.python.org/3/library/typing.html#typing.Generic"}, "parameters": [{"type_str": "ToolkitT", "description": null, "kind": "simple", "symbol_name": "ToolkitT", "doc_url": "/docs/api/llm/tools#toolkit-t"}, {"type_str": "FormattableT", "description": null, "kind": "simple", "symbol_name": "FormattableT", "doc_url": "/docs/api/llm/formatting#formattable-t"}], "doc_url": null}} />, <TypeLink type={{"type_str": "ABC", "description": null, "kind": "simple", "symbol_name": "ABC", "doc_url": null}} />
<AttributesTable
attributes={[
{
"name": "raw",
"type_info": {
"type_str": "Any",
"description": null,
"kind": "simple",
"symbol_name": "Any",
"doc_url": null
},
"description": "The raw response from the LLM."
},
{
"name": "provider_id",
"type_info": {
"type_str": "ProviderId",
"description": null,
"kind": "simple",
"symbol_name": "ProviderId",
"doc_url": null
},
"description": "The provider that generated this response."
},
{
"name": "model_id",
"type_info": {
"type_str": "ModelId",
"description": null,
"kind": "simple",
"symbol_name": "ModelId",
"doc_url": null
},
"description": "The model id that generated this response."
},
{
"name": "params",
"type_info": {
"type_str": "Params",
"description": null,
"kind": "simple",
"symbol_name": "Params",
"doc_url": null
},
"description": "The params that were used to generate this response (or None)."
},
{
"name": "toolkit",
"type_info": {
"type_str": "ToolkitT",
"description": null,
"kind": "simple",
"symbol_name": "ToolkitT",
"doc_url": null
},
"description": "The toolkit containing the tools used when generating this response."
},
{
"name": "messages",
"type_info": {
"type_str": "list[Message]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "list",
"description": null,
"kind": "simple",
"symbol_name": "list",
"doc_url": null
},
"parameters": [
{
"type_str": "Message",
"description": null,
"kind": "simple",
"symbol_name": "Message",
"doc_url": null
}
],
"doc_url": null
},
"description": "The message history, including the most recent assistant message."
},
{
"name": "content",
"type_info": {
"type_str": "Sequence[AssistantContentPart]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Sequence",
"description": null,
"kind": "simple",
"symbol_name": "Sequence",
"doc_url": null
},
"parameters": [
{
"type_str": "AssistantContentPart",
"description": null,
"kind": "simple",
"symbol_name": "AssistantContentPart",
"doc_url": null
}
],
"doc_url": null
},
"description": "The content generated by the LLM."
},
{
"name": "texts",
"type_info": {
"type_str": "Sequence[Text]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Sequence",
"description": null,
"kind": "simple",
"symbol_name": "Sequence",
"doc_url": null
},
"parameters": [
{
"type_str": "Text",
"description": null,
"kind": "simple",
"symbol_name": "Text",
"doc_url": null
}
],
"doc_url": null
},
"description": "The text content in the generated response, if any."
},
{
"name": "tool_calls",
"type_info": {
"type_str": "Sequence[ToolCall]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Sequence",
"description": null,
"kind": "simple",
"symbol_name": "Sequence",
"doc_url": null
},
"parameters": [
{
"type_str": "ToolCall",
"description": null,
"kind": "simple",
"symbol_name": "ToolCall",
"doc_url": null
}
],
"doc_url": null
},
"description": "The tools the LLM wants called on its behalf, if any."
},
{
"name": "thoughts",
"type_info": {
"type_str": "Sequence[Thought]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Sequence",
"description": null,
"kind": "simple",
"symbol_name": "Sequence",
"doc_url": null
},
"parameters": [
{
"type_str": "Thought",
"description": null,
"kind": "simple",
"symbol_name": "Thought",
"doc_url": null
}
],
"doc_url": null
},
"description": "The readable thoughts from the model's thinking process, if any.\n\nThe thoughts may be direct output from the model thinking process, or may be a\ngenerated summary. (This depends on the provider; newer models tend to summarize.)"
},
{
"name": "finish_reason",
"type_info": {
"type_str": "FinishReason | None",
"description": null,
"kind": "union",
"base_type": {
"type_str": "Union",
"description": null,
"kind": "simple",
"symbol_name": "Union",
"doc_url": null
},
"parameters": [
{
"type_str": "FinishReason",
"description": null,
"kind": "simple",
"symbol_name": "FinishReason",
"doc_url": null
},
{
"type_str": "None",
"description": null,
"kind": "simple",
"symbol_name": "None",
"doc_url": null
}
],
"doc_url": null
},
"description": "The reason why the LLM finished generating a response, if set.\n\n`finish_reason` is only set if the response did not finish generating normally,\ne.g. `FinishReason.MAX_TOKENS` if the model ran out of tokens before completing.\nWhen the response generates normally, `response.finish_reason` will be `None`."
},
{
"name": "usage",
"type_info": {
"type_str": "Usage | None",
"description": null,
"kind": "union",
"base_type": {
"type_str": "Union",
"description": null,
"kind": "simple",
"symbol_name": "Union",
"doc_url": null
},
"parameters": [
{
"type_str": "Usage",
"description": null,
"kind": "simple",
"symbol_name": "Usage",
"doc_url": null
},
{
"type_str": "None",
"description": null,
"kind": "simple",
"symbol_name": "None",
"doc_url": null
}
],
"doc_url": null
},
"description": "Token usage statistics for this response, if available."
},
{
"name": "format",
"type_info": {
"type_str": "Format[FormattableT] | None",
"description": null,
"kind": "union",
"base_type": {
"type_str": "Union",
"description": null,
"kind": "simple",
"symbol_name": "Union",
"doc_url": null
},
"parameters": [
{
"type_str": "Format[FormattableT]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Format",
"description": null,
"kind": "simple",
"symbol_name": "Format",
"doc_url": null
},
"parameters": [
{
"type_str": "FormattableT",
"description": null,
"kind": "simple",
"symbol_name": "FormattableT",
"doc_url": null
}
],
"doc_url": null
},
{
"type_str": "None",
"description": null,
"kind": "simple",
"symbol_name": "None",
"doc_url": null
}
],
"doc_url": null
},
"description": "The `Format` describing the structured response format, if available."
},
{
"name": "model",
"type_info": {
"type_str": "Model",
"description": null,
"kind": "simple",
"symbol_name": "Model",
"doc_url": null
},
"description": "A `Model` with parameters matching this response."
}
]}
/>
## <ApiType type="Function" slug="mirascope-llm-responses-root_response--root-response-parse" symbolName="parse" /> parse
Format the response according to the response format parser.
Supports:
- Pydantic BaseModel types (JSON schema validation)
- Primitive types (automatically unwrapped from wrapper model)
- Custom OutputParsers (custom parsing logic)
- Partial parsing during streaming (when partial=True)
<ParametersTable
parameters={[
{
"name": "self",
"type_info": {
"type_str": "Any",
"description": null,
"kind": "simple",
"symbol_name": null,
"doc_url": null
}
},
{
"name": "partial",
"type_info": {
"type_str": "bool",
"description": null,
"kind": "simple",
"symbol_name": "bool",
"doc_url": "https://docs.python.org/3/library/functions.html#bool"
},
"default": "False",
"description": "If True, parse incomplete JSON as Partial model. Only works with\nstreaming responses that have accumulated JSON. Returns None if JSON\nis not yet available or cannot be parsed."
}
]}
/>
<ReturnTable
returnType={{
"type_info": {
"type_str": "FormattableT | Partial[FormattableT] | None",
"description": null,
"kind": "union",
"base_type": {
"type_str": "Union",
"description": null,
"kind": "simple",
"symbol_name": "Union",
"doc_url": "https://docs.python.org/3/library/typing.html#typing.Union"
},
"parameters": [
{
"type_str": "FormattableT",
"description": null,
"kind": "simple",
"symbol_name": "FormattableT",
"doc_url": "/docs/api/llm/formatting#formattable-t"
},
{
"type_str": "Partial[FormattableT]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Partial",
"description": null,
"kind": "simple",
"symbol_name": "Partial",
"doc_url": "/docs/api/llm/formatting#partial"
},
"parameters": [
{
"type_str": "FormattableT",
"description": null,
"kind": "simple",
"symbol_name": "FormattableT",
"doc_url": "/docs/api/llm/formatting#formattable-t"
}
],
"doc_url": null
},
{
"type_str": "None",
"description": null,
"kind": "simple",
"symbol_name": "None",
"doc_url": "https://docs.python.org/3/library/constants.html#None"
}
],
"doc_url": null
},
"description": "The formatted response object of type FormatT. For BaseModel types, returns"
}}
/>
## <ApiType type="Function" slug="mirascope-llm-responses-root_response--root-response-text" symbolName="text" /> text
Return all text content from this response as a single string.
Joins the text from all `Text` parts in the response content using the
specified separator.
<ParametersTable
parameters={[
{
"name": "self",
"type_info": {
"type_str": "Any",
"description": null,
"kind": "simple",
"symbol_name": null,
"doc_url": null
}
},
{
"name": "sep",
"type_info": {
"type_str": "str",
"description": null,
"kind": "simple",
"symbol_name": "str",
"doc_url": "https://docs.python.org/3/library/stdtypes.html#str"
},
"default": "'\\n'",
"description": "The separator to use when joining multiple text parts.\nDefaults to newline (\"\\n\")."
}
]}
/>
<ReturnTable
returnType={{
"type_info": {
"type_str": "str",
"description": null,
"kind": "simple",
"symbol_name": "str",
"doc_url": "https://docs.python.org/3/library/stdtypes.html#str"
},
"description": "A string containing all text content joined by the separator."
}}
/>
## <ApiType type="Function" slug="mirascope-llm-responses-root_response--root-response-pretty" symbolName="pretty" /> pretty
Return a string representation of all response content.
The response content will be represented in a way that emphasies clarity and
readability, but may not include all metadata (like thinking signatures or tool
call ids), and thus cannot be used to reconstruct the response. For example:
**Thinking:**
The user is asking a math problem. I should use the calculator tool.
**Tool Call (calculator)** \{'operation': 'mult', 'a': 1337, 'b': 4242\}
I am going to use the calculator and answer your question for you!
<ParametersTable
parameters={[
{
"name": "self",
"type_info": {
"type_str": "Any",
"description": null,
"kind": "simple",
"symbol_name": null,
"doc_url": null
}
}
]}
/>
<ReturnTable
returnType={{
"type_info": {
"type_str": "str",
"description": null,
"kind": "simple",
"symbol_name": "str",
"doc_url": "https://docs.python.org/3/library/stdtypes.html#str"
}
}}
/>
## <ApiType type="Attribute" slug="stream" symbolName="Stream" /> Stream
**Type:** <TypeLink type={{"type_str": "TypeAlias", "description": null, "kind": "simple", "symbol_name": "TypeAlias", "doc_url": null}} />
A synchronous assistant content stream.
## <ApiType type="Class" slug="stream-response" symbolName="StreamResponse" /> StreamResponse
A `StreamResponse` wraps response content from the LLM with a streaming interface.
This class supports iteration to process chunks as they arrive from the model.
Content can be streamed in one of three ways:
- Via `.streams()`, which provides an iterator of streams, where each
stream contains chunks of streamed data. The chunks contain `delta`s (new content
in that particular chunk), and the stream itself accumulates the collected state
of all the chunks processed thus far.
- Via `.chunk_stream()` which allows iterating over Mirascope's provider-
agnostic chunk representation.
- Via `.pretty_stream()` a helper method which provides all response content
as `str` deltas. Iterating through `pretty_stream` will yield text content and
optionally placeholder representations for other content types, but it will still
consume the full stream.
- Via `.structured_stream()`, a helper method which provides partial
structured outputs from a response (useful when FormatT is set). Iterating through
`structured_stream` will only yield structured partials, but it will still consume
the full stream.
As chunks are consumed, they are collected in-memory on the `StreamResponse`, and they
become available in `.content`, `.messages`, `.tool_calls`, etc. All of the stream
iterators can be restarted after the stream has been consumed, in which case they
will yield chunks from memory in the original sequence that came from the LLM. If
the stream is only partially consumed, a fresh iterator will first iterate through
in-memory content, and then will continue consuming fresh chunks from the LLM.
In the specific case of text chunks, they are included in the response content as soon
as they become available, via an `llm.Text` part that updates as more deltas come in.
This enables the behavior where resuming a partially-streamed response will include
as much text as the model generated.
For other chunks, like `Thinking` or `ToolCall`, they are only added to response
content once the corresponding part has fully streamed. This avoids issues like
adding incomplete tool calls, or thinking blocks missing signatures, to the response.
For each iterator, fully iterating through the iterator will consume the whole
LLM stream. You can pause stream execution midway by breaking out of the iterator,
and you can safely resume execution from the same iterator if desired.
**Bases:**
<TypeLink type={{"type_str": "BaseSyncStreamResponse[Toolkit, FormattableT]", "description": null, "kind": "generic", "base_type": {"type_str": "BaseSyncStreamResponse", "description": null, "kind": "simple", "symbol_name": "BaseSyncStreamResponse", "doc_url": null}, "parameters": [{"type_str": "Toolkit", "description": null, "kind": "simple", "symbol_name": "Toolkit", "doc_url": "/docs/api/llm/tools#toolkit"}, {"type_str": "FormattableT", "description": null, "kind": "simple", "symbol_name": "FormattableT", "doc_url": "/docs/api/llm/formatting#formattable-t"}], "doc_url": null}} />
## <ApiType type="Function" slug="mirascope-llm-responses-stream_response--stream-response-execute_tools" symbolName="execute_tools" /> execute_tools
Execute and return all of the tool calls in the response.
<ParametersTable
parameters={[
{
"name": "self",
"type_info": {
"type_str": "Any",
"description": null,
"kind": "simple",
"symbol_name": null,
"doc_url": null
}
}
]}
/>
<ReturnTable
returnType={{
"type_info": {
"type_str": "Sequence[ToolOutput[Jsonable]]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Sequence",
"description": null,
"kind": "simple",
"symbol_name": "Sequence",
"doc_url": "https://docs.python.org/3/library/typing.html#typing.Sequence"
},
"parameters": [
{
"type_str": "ToolOutput[Jsonable]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "ToolOutput",
"description": null,
"kind": "simple",
"symbol_name": "ToolOutput",
"doc_url": "/docs/api/llm/content#tool-output"
},
"parameters": [
{
"type_str": "Jsonable",
"description": null,
"kind": "simple",
"symbol_name": "Jsonable",
"doc_url": "/docs/api/llm/types#jsonable"
}
],
"doc_url": null
}
],
"doc_url": null
},
"description": "A sequence containing a `ToolOutput` for every tool call in the order they appeared."
}}
/>
## <ApiType type="Function" slug="mirascope-llm-responses-stream_response--stream-response-resume" symbolName="resume" /> resume
Generate a new `StreamResponse` using this response's messages with additional user content.
Uses this response's tools and format type. Also uses this response's provider,
model, client, and params, unless the model context manager is being used to
provide a new LLM as an override.
<ParametersTable
parameters={[
{
"name": "self",
"type_info": {
"type_str": "Any",
"description": null,
"kind": "simple",
"symbol_name": null,
"doc_url": null
}
},
{
"name": "content",
"type_info": {
"type_str": "UserContent",
"description": null,
"kind": "simple",
"symbol_name": "UserContent",
"doc_url": "/docs/api/llm/messages#user-content"
},
"description": "The new user message content to append to the message history."
}
]}
/>
<ReturnTable
returnType={{
"type_info": {
"type_str": "StreamResponse | StreamResponse[FormattableT]",
"description": null,
"kind": "union",
"base_type": {
"type_str": "Union",
"description": null,
"kind": "simple",
"symbol_name": "Union",
"doc_url": "https://docs.python.org/3/library/typing.html#typing.Union"
},
"parameters": [
{
"type_str": "StreamResponse",
"description": null,
"kind": "simple",
"symbol_name": "StreamResponse",
"doc_url": "/docs/api/llm/responses#stream-response"
},
{
"type_str": "StreamResponse[FormattableT]",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "StreamResponse",
"description": null,
"kind": "simple",
"symbol_name": "StreamResponse",
"doc_url": "/docs/api/llm/responses#stream-response"
},
"parameters": [
{
"type_str": "FormattableT",
"description": null,
"kind": "simple",
"symbol_name": "FormattableT",
"doc_url": "/docs/api/llm/formatting#formattable-t"
}
],
"doc_url": null
}
],
"doc_url": null
},
"description": "A new `StreamResponse` instance generated from the extended message history."
}}
/>
## <ApiType type="Attribute" slug="stream-response-chunk" symbolName="StreamResponseChunk" /> StreamResponseChunk
**Type:** <TypeLink type={{"type_str": "TypeAlias", "description": null, "kind": "simple", "symbol_name": "TypeAlias", "doc_url": null}} />
## <ApiType type="Attribute" slug="stream-response-t" symbolName="StreamResponseT" /> StreamResponseT
**Type:** <TypeLink type={{"type_str": "TypeVar('StreamResponseT', bound='BaseStreamResponse[Any, Any, Any]')", "description": null, "kind": "simple", "symbol_name": null, "doc_url": null}} />
## <ApiType type="Class" slug="text-stream" symbolName="TextStream" /> TextStream
Synchronous text stream implementation.
**Bases:**
<TypeLink type={{"type_str": "BaseStream[Text, str]", "description": null, "kind": "generic", "base_type": {"type_str": "BaseStream", "description": null, "kind": "simple", "symbol_name": "BaseStream", "doc_url": null}, "parameters": [{"type_str": "Text", "description": null, "kind": "simple", "symbol_name": "Text", "doc_url": "/docs/api/llm/content#text"}, {"type_str": "str", "description": null, "kind": "simple", "symbol_name": "str", "doc_url": "https://docs.python.org/3/library/stdtypes.html#str"}], "doc_url": null}} />
<AttributesTable
attributes={[
{
"name": "type",
"type_info": {
"type_str": "Literal['text_stream']",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Literal",
"description": null,
"kind": "simple",
"symbol_name": "Literal",
"doc_url": null
},
"parameters": [
{
"type_str": "'text_stream'",
"description": null,
"kind": "simple",
"symbol_name": "'text_stream'",
"doc_url": null
}
],
"doc_url": null
}
},
{
"name": "content_type",
"type_info": {
"type_str": "Literal['text']",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Literal",
"description": null,
"kind": "simple",
"symbol_name": "Literal",
"doc_url": null
},
"parameters": [
{
"type_str": "'text'",
"description": null,
"kind": "simple",
"symbol_name": "'text'",
"doc_url": null
}
],
"doc_url": null
},
"description": "The type of content stored in this stream."
},
{
"name": "partial_text",
"type_info": {
"type_str": "str",
"description": null,
"kind": "simple",
"symbol_name": "str",
"doc_url": null
},
"description": "The accumulated text content as chunks are received."
}
]}
/>
## <ApiType type="Function" slug="mirascope-llm-responses-streams--text-stream-collect" symbolName="collect" /> collect
Collect all chunks and return the final Text content.
<ParametersTable
parameters={[
{
"name": "self",
"type_info": {
"type_str": "Any",
"description": null,
"kind": "simple",
"symbol_name": null,
"doc_url": null
}
}
]}
/>
<ReturnTable
returnType={{
"type_info": {
"type_str": "Text",
"description": null,
"kind": "simple",
"symbol_name": "Text",
"doc_url": "/docs/api/llm/content#text"
},
"description": "The complete text content after consuming all chunks."
}}
/>
## <ApiType type="Class" slug="thought-stream" symbolName="ThoughtStream" /> ThoughtStream
Synchronous thought stream implementation.
**Bases:**
<TypeLink type={{"type_str": "BaseStream[Thought, str]", "description": null, "kind": "generic", "base_type": {"type_str": "BaseStream", "description": null, "kind": "simple", "symbol_name": "BaseStream", "doc_url": null}, "parameters": [{"type_str": "Thought", "description": null, "kind": "simple", "symbol_name": "Thought", "doc_url": "/docs/api/llm/content#thought"}, {"type_str": "str", "description": null, "kind": "simple", "symbol_name": "str", "doc_url": "https://docs.python.org/3/library/stdtypes.html#str"}], "doc_url": null}} />
<AttributesTable
attributes={[
{
"name": "type",
"type_info": {
"type_str": "Literal['thought_stream']",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Literal",
"description": null,
"kind": "simple",
"symbol_name": "Literal",
"doc_url": null
},
"parameters": [
{
"type_str": "'thought_stream'",
"description": null,
"kind": "simple",
"symbol_name": "'thought_stream'",
"doc_url": null
}
],
"doc_url": null
}
},
{
"name": "content_type",
"type_info": {
"type_str": "Literal['thought']",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Literal",
"description": null,
"kind": "simple",
"symbol_name": "Literal",
"doc_url": null
},
"parameters": [
{
"type_str": "'thought'",
"description": null,
"kind": "simple",
"symbol_name": "'thought'",
"doc_url": null
}
],
"doc_url": null
},
"description": "The type of content stored in this stream."
},
{
"name": "partial_thought",
"type_info": {
"type_str": "str",
"description": null,
"kind": "simple",
"symbol_name": "str",
"doc_url": null
},
"description": "The accumulated thought content as chunks are received."
}
]}
/>
## <ApiType type="Function" slug="mirascope-llm-responses-streams--thought-stream-collect" symbolName="collect" /> collect
Collect all chunks and return the final Thought content.
<ParametersTable
parameters={[
{
"name": "self",
"type_info": {
"type_str": "Any",
"description": null,
"kind": "simple",
"symbol_name": null,
"doc_url": null
}
}
]}
/>
<ReturnTable
returnType={{
"type_info": {
"type_str": "Thought",
"description": null,
"kind": "simple",
"symbol_name": "Thought",
"doc_url": "/docs/api/llm/content#thought"
},
"description": "The complete thought content after consuming all chunks."
}}
/>
## <ApiType type="Class" slug="tool-call-stream" symbolName="ToolCallStream" /> ToolCallStream
Synchronous tool call stream implementation.
**Bases:**
<TypeLink type={{"type_str": "BaseStream[ToolCall, str]", "description": null, "kind": "generic", "base_type": {"type_str": "BaseStream", "description": null, "kind": "simple", "symbol_name": "BaseStream", "doc_url": null}, "parameters": [{"type_str": "ToolCall", "description": null, "kind": "simple", "symbol_name": "ToolCall", "doc_url": "/docs/api/llm/content#tool-call"}, {"type_str": "str", "description": null, "kind": "simple", "symbol_name": "str", "doc_url": "https://docs.python.org/3/library/stdtypes.html#str"}], "doc_url": null}} />
<AttributesTable
attributes={[
{
"name": "type",
"type_info": {
"type_str": "Literal['tool_call_stream']",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Literal",
"description": null,
"kind": "simple",
"symbol_name": "Literal",
"doc_url": null
},
"parameters": [
{
"type_str": "'tool_call_stream'",
"description": null,
"kind": "simple",
"symbol_name": "'tool_call_stream'",
"doc_url": null
}
],
"doc_url": null
}
},
{
"name": "content_type",
"type_info": {
"type_str": "Literal['tool_call']",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Literal",
"description": null,
"kind": "simple",
"symbol_name": "Literal",
"doc_url": null
},
"parameters": [
{
"type_str": "'tool_call'",
"description": null,
"kind": "simple",
"symbol_name": "'tool_call'",
"doc_url": null
}
],
"doc_url": null
},
"description": "The type of content stored in this stream."
},
{
"name": "tool_id",
"type_info": {
"type_str": "str",
"description": null,
"kind": "simple",
"symbol_name": "str",
"doc_url": null
},
"description": "A unique identifier for this tool call."
},
{
"name": "tool_name",
"type_info": {
"type_str": "str",
"description": null,
"kind": "simple",
"symbol_name": "str",
"doc_url": null
},
"description": "The name of the tool being called."
},
{
"name": "partial_args",
"type_info": {
"type_str": "str",
"description": null,
"kind": "simple",
"symbol_name": "str",
"doc_url": null
},
"description": "The accumulated tool arguments as chunks are received."
}
]}
/>
## <ApiType type="Function" slug="mirascope-llm-responses-streams--tool-call-stream-collect" symbolName="collect" /> collect
Collect all chunks and return the final ToolCall content.
<ParametersTable
parameters={[
{
"name": "self",
"type_info": {
"type_str": "Any",
"description": null,
"kind": "simple",
"symbol_name": null,
"doc_url": null
}
}
]}
/>
<ReturnTable
returnType={{
"type_info": {
"type_str": "ToolCall",
"description": null,
"kind": "simple",
"symbol_name": "ToolCall",
"doc_url": "/docs/api/llm/content#tool-call"
},
"description": "The complete tool call after consuming all chunks."
}}
/>
## <ApiType type="Class" slug="usage" symbolName="Usage" /> Usage
Token usage statistics from an LLM API call.
This abstraction captures common usage metrics across providers while preserving
access to the raw provider-specific usage data.
<AttributesTable
attributes={[
{
"name": "input_tokens",
"type_info": {
"type_str": "int",
"description": null,
"kind": "simple",
"symbol_name": "int",
"doc_url": null
},
"description": "The number of input tokens used.\n\nThis includes ALL input tokens, including cache read and write tokens.\n\nWill be 0 if not reported by the provider."
},
{
"name": "output_tokens",
"type_info": {
"type_str": "int",
"description": null,
"kind": "simple",
"symbol_name": "int",
"doc_url": null
},
"description": "The number of output tokens used.\n\nThis includes ALL output tokens, including `reasoning_tokens` that may not be\nin the user's visible output, or other \"hidden\" tokens.\n\nWill be 0 if not reported by the provider."
},
{
"name": "cache_read_tokens",
"type_info": {
"type_str": "int",
"description": null,
"kind": "simple",
"symbol_name": "int",
"doc_url": null
},
"description": "The number of tokens read from cache (prompt caching).\n\nThese are input tokens that were read from cache. Cache read tokens are generally\nmuch less expensive than regular input tokens.\n\nWill be 0 if not reported by the provider or if caching was not used."
},
{
"name": "cache_write_tokens",
"type_info": {
"type_str": "int",
"description": null,
"kind": "simple",
"symbol_name": "int",
"doc_url": null
},
"description": "The number of tokens written to cache (cache creation).\n\nThese are input tokens that were written to cache, for future reuse and retrieval.\nCache write tokens are generally more expensive than uncached input tokens,\nbut may lead to cost savings down the line when they are re-read as cache_read_tokens.\n\nWill be 0 if not reported by the provider or if caching was not used."
},
{
"name": "reasoning_tokens",
"type_info": {
"type_str": "int",
"description": null,
"kind": "simple",
"symbol_name": "int",
"doc_url": null
},
"description": "The number of tokens used for reasoning/thinking.\n\nReasoning tokens are a subset of output_tokens that were generated as part of the model's\ninterior reasoning process. They are billed as output tokens, though they are generally\nnot shown to the user.\n\nWill be 0 if not reported by the provider or if the model does not support reasoning."
},
{
"name": "raw",
"type_info": {
"type_str": "Any",
"description": null,
"kind": "simple",
"symbol_name": "Any",
"doc_url": null
},
"description": "The raw usage object from the provider."
},
{
"name": "total_tokens",
"type_info": {
"type_str": "int",
"description": null,
"kind": "simple",
"symbol_name": "int",
"doc_url": null
},
"description": "The total number of tokens used (input + output)."
}
]}
/>
## <ApiType type="Class" slug="usage-delta-chunk" symbolName="UsageDeltaChunk" /> UsageDeltaChunk
A chunk containing incremental token usage information from a streaming response.
This represents a delta/increment in usage statistics as they arrive during streaming.
Multiple UsageDeltaChunks are accumulated to produce the final Usage object.
<AttributesTable
attributes={[
{
"name": "type",
"type_info": {
"type_str": "Literal['usage_delta_chunk']",
"description": null,
"kind": "generic",
"base_type": {
"type_str": "Literal",
"description": null,
"kind": "simple",
"symbol_name": "Literal",
"doc_url": null
},
"parameters": [
{
"type_str": "'usage_delta_chunk'",
"description": null,
"kind": "simple",
"symbol_name": "'usage_delta_chunk'",
"doc_url": null
}
],
"doc_url": null
}
},
{
"name": "input_tokens",
"type_info": {
"type_str": "int",
"description": null,
"kind": "simple",
"symbol_name": "int",
"doc_url": null
},
"description": "Delta in input tokens."
},
{
"name": "output_tokens",
"type_info": {
"type_str": "int",
"description": null,
"kind": "simple",
"symbol_name": "int",
"doc_url": null
},
"description": "Delta in output tokens."
},
{
"name": "cache_read_tokens",
"type_info": {
"type_str": "int",
"description": null,
"kind": "simple",
"symbol_name": "int",
"doc_url": null
},
"description": "Delta in cache read tokens."
},
{
"name": "cache_write_tokens",
"type_info": {
"type_str": "int",
"description": null,
"kind": "simple",
"symbol_name": "int",
"doc_url": null
},
"description": "Delta in cache write tokens."
},
{
"name": "reasoning_tokens",
"type_info": {
"type_str": "int",
"description": null,
"kind": "simple",
"symbol_name": "int",
"doc_url": null
},
"description": "Delta in reasoning/thinking tokens."
}
]}
/>