diff --git a/assemblyai/__version__.py b/assemblyai/__version__.py index e2ba204..4136999 100644 --- a/assemblyai/__version__.py +++ b/assemblyai/__version__.py @@ -1 +1 @@ -__version__ = "0.52.1" +__version__ = "0.52.2" diff --git a/assemblyai/streaming/v3/client.py b/assemblyai/streaming/v3/client.py index aff51f1..b2b2002 100644 --- a/assemblyai/streaming/v3/client.py +++ b/assemblyai/streaming/v3/client.py @@ -73,10 +73,12 @@ def __init__(self, options: StreamingClientOptions): def connect(self, params: StreamingParameters) -> None: params_dict = _dump_model(params) - # JSON-encode list parameters for proper API compatibility (e.g., keyterms_prompt) + # JSON-encode list and dict parameters for proper API compatibility (e.g., keyterms_prompt, llm_gateway) for key, value in params_dict.items(): if isinstance(value, list): params_dict[key] = json.dumps(value) + elif isinstance(value, dict): + params_dict[key] = json.dumps(value) params_encoded = urlencode(params_dict) diff --git a/assemblyai/streaming/v3/models.py b/assemblyai/streaming/v3/models.py index c2a9aa4..b6557c5 100644 --- a/assemblyai/streaming/v3/models.py +++ b/assemblyai/streaming/v3/models.py @@ -5,6 +5,17 @@ from pydantic import BaseModel +class LLMGatewayMessage(BaseModel): + role: str + content: str + + +class LLMGatewayConfig(BaseModel): + model: str + messages: List["LLMGatewayMessage"] + max_tokens: int + + class Word(BaseModel): start: int end: int @@ -94,6 +105,7 @@ class StreamingParameters(StreamingSessionParameters): webhook_url: Optional[str] = None webhook_auth_header_name: Optional[str] = None webhook_auth_header_value: Optional[str] = None + llm_gateway: Optional[LLMGatewayConfig] = None class UpdateConfiguration(StreamingSessionParameters):