GoogleStream(
*,
stream: (
Generator[
tuple[
_BaseCallResponseChunkT, _BaseToolT | None
],
None,
None,
]
| AsyncGenerator[
tuple[
_BaseCallResponseChunkT, _BaseToolT | None
],
None,
]
),
metadata: Metadata,
tool_types: list[type[_BaseToolT]] | None,
call_response_type: type[_BaseCallResponseT],
model: str,
prompt_template: str | None,
fn_args: dict[str, Any],
dynamic_config: _BaseDynamicConfigT,
messages: list[_MessageParamT],
call_params: _BaseCallParamsT,
call_kwargs: BaseCallKwargs[_ToolSchemaT]
)
Bases: BaseStream[GoogleCallResponse, GoogleCallResponseChunk, ContentDict, ContentDict, ContentDict, ContentListUnion | ContentListUnionDict, GoogleTool, Tool, GoogleDynamicConfig, GoogleCallParams, FinishReason]
A class for convenience around streaming Google LLM calls.
Example:
from mirascope.core import prompt_template
from mirascope.core.google import google_call
@google_call("google-1.5-flash", stream=True)
def recommend_book(genre: str) -> str:
return f"Recommend a {genre} book"
stream = recommend_book("fantasy") # returns `GoogleStream` instance
for chunk, _ in stream:
print(chunk.content, end="", flush=True)
Source code in mirascope/core/base/stream.py
| def __init__(
self,
*,
stream: Generator[tuple[_BaseCallResponseChunkT, _BaseToolT | None], None, None]
| AsyncGenerator[
tuple[_BaseCallResponseChunkT, _BaseToolT | None],
None,
],
metadata: Metadata,
tool_types: list[type[_BaseToolT]] | None,
call_response_type: type[_BaseCallResponseT],
model: str,
prompt_template: str | None,
fn_args: dict[str, Any],
dynamic_config: _BaseDynamicConfigT,
messages: list[_MessageParamT],
call_params: _BaseCallParamsT,
call_kwargs: BaseCallKwargs[_ToolSchemaT],
) -> None:
"""Initializes an instance of `BaseStream`."""
self.content = ""
self.stream = stream
self.metadata = metadata
self.tool_types = tool_types
self.call_response_type = call_response_type
self.model = model
self.prompt_template = prompt_template
self.fn_args = fn_args
self.dynamic_config = dynamic_config
self.messages = messages
self.call_params = call_params
self.call_kwargs = call_kwargs
self.user_message_param = get_possible_user_message_param(messages) # pyright: ignore [reportAttributeAccessIssue]
|
construct_call_response
Constructs the call response from a consumed GoogleStream.
Raises:
Type |
Description |
ValueError
|
if the stream has not yet been consumed.
|
Source code in mirascope/core/google/stream.py
| def construct_call_response(self) -> GoogleCallResponse:
"""Constructs the call response from a consumed GoogleStream.
Raises:
ValueError: if the stream has not yet been consumed.
"""
if not hasattr(self, "message_param"):
raise ValueError(
"No stream response, check if the stream has been consumed."
)
candidates_token_count = (
int(self.output_tokens) if self.output_tokens is not None else None
)
prompt_token_count = (
int(self.input_tokens) if self.input_tokens is not None else None
)
total_token_count = int(candidates_token_count or 0) + int(
prompt_token_count or 0
)
response = GenerateContentResponse(
candidates=[
Candidate(
finish_reason=self.finish_reasons[0]
if self.finish_reasons
else FinishReason.STOP,
content=Content(
role=self.message_param["role"], # pyright: ignore [reportTypedDictNotRequiredAccess]
parts=self.message_param["parts"], # pyright: ignore [reportTypedDictNotRequiredAccess, reportArgumentType]
),
)
],
model_version=self.model,
usage_metadata=GenerateContentResponseUsageMetadata(
candidates_token_count=candidates_token_count,
prompt_token_count=prompt_token_count,
total_token_count=total_token_count,
),
)
return GoogleCallResponse(
metadata=self.metadata,
response=response,
tool_types=self.tool_types,
prompt_template=self.prompt_template,
fn_args=self.fn_args if self.fn_args else {},
dynamic_config=self.dynamic_config,
messages=self.messages,
call_params=self.call_params,
call_kwargs=self.call_kwargs,
user_message_param=self.user_message_param,
start_time=self.start_time,
end_time=self.end_time,
)
|