diff --git a/backend/app/alembic/versions/042_add_llm_call_table.py b/backend/app/alembic/versions/042_add_llm_call_table.py new file mode 100644 index 000000000..f7db5ba8c --- /dev/null +++ b/backend/app/alembic/versions/042_add_llm_call_table.py @@ -0,0 +1,185 @@ +"""add_llm_call_table + +Revision ID: 042 +Revises: 041 +Create Date: 2026-01-26 15:20:23.873332 + +""" +from alembic import op +import sqlalchemy as sa +import sqlmodel.sql.sqltypes +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "042" +down_revision = "041" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "llm_call", + sa.Column( + "id", + sa.Uuid(), + nullable=False, + comment="Unique identifier for the LLM call record", + ), + sa.Column( + "job_id", + sa.Uuid(), + nullable=False, + comment="Reference to the parent job (status tracked in job table)", + ), + sa.Column( + "project_id", + sa.Integer(), + nullable=False, + comment="Reference to the project this LLM call belongs to", + ), + sa.Column( + "organization_id", + sa.Integer(), + nullable=False, + comment="Reference to the organization this LLM call belongs to", + ), + sa.Column( + "input", + sqlmodel.sql.sqltypes.AutoString(), + nullable=False, + comment="User input - text string, binary data, or file path for multimodal", + ), + sa.Column( + "input_type", + sa.String(), + nullable=False, + comment="Input type: text, audio, image", + ), + sa.Column( + "output_type", + sa.String(), + nullable=True, + comment="Expected output type: text, audio, image", + ), + sa.Column( + "provider", + sa.String(), + nullable=False, + comment="AI provider: openai, google, anthropic", + ), + sa.Column( + "model", + sqlmodel.sql.sqltypes.AutoString(), + nullable=False, + comment="Specific model used e.g. 'gpt-4o', 'gemini-2.5-pro'", + ), + sa.Column( + "provider_response_id", + sqlmodel.sql.sqltypes.AutoString(), + nullable=True, + comment="Original response ID from the provider (e.g., OpenAI's response ID)", + ), + sa.Column( + "content", + postgresql.JSONB(astext_type=sa.Text()), + nullable=True, + comment="Response content: {text: '...'}, {audio_bytes: '...'}, or {image: '...'}", + ), + sa.Column( + "usage", + postgresql.JSONB(astext_type=sa.Text()), + nullable=True, + comment="Token usage: {input_tokens, output_tokens, reasoning_tokens}", + ), + sa.Column( + "conversation_id", + sqlmodel.sql.sqltypes.AutoString(), + nullable=True, + comment="Identifier linking this response to its conversation thread", + ), + sa.Column( + "auto_create", + sa.Boolean(), + nullable=True, + comment="Whether to auto-create conversation if conversation_id doesn't exist (OpenAI specific)", + ), + sa.Column( + "config", + postgresql.JSONB(astext_type=sa.Text()), + nullable=True, + comment="Configuration: {config_id, config_version} for stored config OR {config_blob} for ad-hoc config", + ), + sa.Column( + "created_at", + sa.DateTime(), + nullable=False, + comment="Timestamp when the LLM call was created", + ), + sa.Column( + "updated_at", + sa.DateTime(), + nullable=False, + comment="Timestamp when the LLM call was last updated", + ), + sa.Column( + "deleted_at", + sa.DateTime(), + nullable=True, + comment="Timestamp when the record was soft-deleted", + ), + sa.ForeignKeyConstraint(["job_id"], ["job.id"], ondelete="CASCADE"), + sa.ForeignKeyConstraint( + ["organization_id"], ["organization.id"], ondelete="CASCADE" + ), + sa.ForeignKeyConstraint(["project_id"], ["project.id"], ondelete="CASCADE"), + sa.PrimaryKeyConstraint("id"), + ) + op.create_index( + "idx_llm_call_conversation_id", + "llm_call", + ["conversation_id"], + unique=False, + postgresql_where=sa.text("conversation_id IS NOT NULL AND deleted_at IS NULL"), + ) + op.create_index( + "idx_llm_call_job_id", + "llm_call", + ["job_id"], + unique=False, + postgresql_where=sa.text("deleted_at IS NULL"), + ) + op.alter_column( + "collection", + "llm_service_name", + existing_type=sa.VARCHAR(), + comment="Name of the LLM service", + existing_comment="Name of the LLM service provider", + existing_nullable=False, + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.alter_column( + "collection", + "llm_service_name", + existing_type=sa.VARCHAR(), + comment="Name of the LLM service provider", + existing_comment="Name of the LLM service", + existing_nullable=False, + ) + op.drop_index( + "idx_llm_call_job_id", + table_name="llm_call", + postgresql_where=sa.text("deleted_at IS NULL"), + ) + op.drop_index( + "idx_llm_call_conversation_id", + table_name="llm_call", + postgresql_where=sa.text("conversation_id IS NOT NULL AND deleted_at IS NULL"), + ) + op.drop_table("llm_call") + # ### end Alembic commands ### diff --git a/backend/app/alembic/versions/042_extend_collection_table_for_provider_.py b/backend/app/alembic/versions/043_extend_collection_table_for_provider_.py similarity index 97% rename from backend/app/alembic/versions/042_extend_collection_table_for_provider_.py rename to backend/app/alembic/versions/043_extend_collection_table_for_provider_.py index 91768f6a2..f1dbee78b 100644 --- a/backend/app/alembic/versions/042_extend_collection_table_for_provider_.py +++ b/backend/app/alembic/versions/043_extend_collection_table_for_provider_.py @@ -1,7 +1,7 @@ """extend collection table for provider agnostic support -Revision ID: 042 -Revises: 041 +Revision ID: 043 +Revises: 042 Create Date: 2026-01-15 16:53:19.495583 """ @@ -12,8 +12,8 @@ # revision identifiers, used by Alembic. -revision = "042" -down_revision = "041" +revision = "043" +down_revision = "042" branch_labels = None depends_on = None diff --git a/backend/app/alembic/versions/044_remove_enum_checks_llm_call_provider.py b/backend/app/alembic/versions/044_remove_enum_checks_llm_call_provider.py new file mode 100644 index 000000000..40024e8c5 --- /dev/null +++ b/backend/app/alembic/versions/044_remove_enum_checks_llm_call_provider.py @@ -0,0 +1,43 @@ +"""remove:enum checks llm_call provider + +Revision ID: 044 +Revises: 043 +Create Date: 2026-01-30 11:22:45.165543 + +""" +from alembic import op +import sqlalchemy as sa +import sqlmodel.sql.sqltypes + + +# revision identifiers, used by Alembic. +revision = "044" +down_revision = "043" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.alter_column( + "llm_call", + "provider", + existing_type=sa.VARCHAR(), + comment="AI provider as sent by user (e.g openai, -native, google)", + existing_comment="AI provider: openai, google, anthropic", + existing_nullable=False, + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.alter_column( + "llm_call", + "provider", + existing_type=sa.VARCHAR(), + comment="AI provider: openai, google, anthropic", + existing_comment="AI provider as sent by user (e.g openai, -native, google)", + existing_nullable=False, + ) + # ### end Alembic commands ### diff --git a/backend/app/api/routes/config/version.py b/backend/app/api/routes/config/version.py index 5f3e8626a..ba1da4afd 100644 --- a/backend/app/api/routes/config/version.py +++ b/backend/app/api/routes/config/version.py @@ -4,7 +4,7 @@ from app.api.deps import SessionDep, AuthContextDep from app.crud.config import ConfigCrud, ConfigVersionCrud from app.models import ( - ConfigVersionCreate, + ConfigVersionCreatePartial, ConfigVersionPublic, Message, ConfigVersionItems, @@ -24,18 +24,21 @@ ) def create_version( config_id: UUID, - version_create: ConfigVersionCreate, + version_create: ConfigVersionCreatePartial, current_user: AuthContextDep, session: SessionDep, ): """ Create a new version for an existing configuration. - The version number is automatically incremented. + + Only include the fields you want to update in config_blob. + Provider, model, and params can be changed. + Type is inherited from existing config and cannot be changed. """ version_crud = ConfigVersionCrud( session=session, project_id=current_user.project_.id, config_id=config_id ) - version = version_crud.create_or_raise(version_create=version_create) + version = version_crud.create_from_partial_or_raise(version_create=version_create) return APIResponse.success_response( data=ConfigVersionPublic(**version.model_dump()), diff --git a/backend/app/celery/beat.py b/backend/app/celery/beat.py index eeaeb8531..e8048ffb3 100644 --- a/backend/app/celery/beat.py +++ b/backend/app/celery/beat.py @@ -1,6 +1,7 @@ """ Celery beat scheduler for cron jobs. """ + import logging from celery import Celery from app.celery.celery_app import celery_app diff --git a/backend/app/celery/utils.py b/backend/app/celery/utils.py index 957c02d9a..8730ea481 100644 --- a/backend/app/celery/utils.py +++ b/backend/app/celery/utils.py @@ -2,6 +2,7 @@ Utility functions for easy Celery integration across the application. Business logic modules can use these functions without knowing Celery internals. """ + import logging from typing import Any, Dict, Optional from celery.result import AsyncResult diff --git a/backend/app/celery/worker.py b/backend/app/celery/worker.py index e48b655b0..e48ba9a85 100644 --- a/backend/app/celery/worker.py +++ b/backend/app/celery/worker.py @@ -1,6 +1,7 @@ """ Celery worker management script. """ + import logging import multiprocessing from celery.bin import worker diff --git a/backend/app/cli/bench/commands.py b/backend/app/cli/bench/commands.py index 0b504754c..9f12b56c6 100644 --- a/backend/app/cli/bench/commands.py +++ b/backend/app/cli/bench/commands.py @@ -210,7 +210,7 @@ def send_benchmark_request( ) else: typer.echo(response.text) - typer.echo(f"[{i+1}/{total}] FAILED - Status: {response.status_code}") + typer.echo(f"[{i + 1}/{total}] FAILED - Status: {response.status_code}") raise Exception(f"Request failed with status code {response.status_code}") diff --git a/backend/app/core/audio_utils.py b/backend/app/core/audio_utils.py new file mode 100644 index 000000000..49176a656 --- /dev/null +++ b/backend/app/core/audio_utils.py @@ -0,0 +1,167 @@ +""" +Audio processing utilities for format conversion. + +This module provides utilities for converting audio between different formats, +particularly for TTS output post-processing. +""" + +import logging +import subprocess +import tempfile +import wave +from pathlib import Path + +logger = logging.getLogger(__name__) + + +def convert_pcm_to_wav( + pcm_bytes: bytes, sample_rate: int = 24000, channels: int = 1, sample_width: int = 2 +) -> bytes: + """Convert raw PCM audio bytes to WAV format with headers. + + Args: + pcm_bytes: Raw PCM audio data (16-bit little-endian) + sample_rate: Sample rate in Hz (default: 24000 for Gemini TTS) + channels: Number of audio channels (default: 1 for mono) + sample_width: Sample width in bytes (default: 2 for 16-bit) + + Returns: + WAV file bytes with proper headers + """ + with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as temp_file: + temp_path = Path(temp_file.name) + + try: + with wave.open(str(temp_path), "wb") as wav_file: + wav_file.setnchannels(channels) + wav_file.setsampwidth(sample_width) + wav_file.setframerate(sample_rate) + wav_file.writeframes(pcm_bytes) + + with open(temp_path, "rb") as f: + wav_bytes = f.read() + + temp_path.unlink(missing_ok=True) + return wav_bytes + + except Exception as e: + temp_path.unlink(missing_ok=True) + raise e + + +def _convert_audio_with_ffmpeg( + wav_bytes: bytes, + output_format: str, + codec: str, + quality_arg: str, + quality_value: str, + func_name: str, +) -> tuple[bytes | None, str | None]: + """Helper function to convert audio using ffmpeg. + + Args: + wav_bytes: WAV audio data with headers + output_format: Output format extension (mp3, ogg) + codec: ffmpeg codec name (libmp3lame, libvorbis) + quality_arg: Quality argument flag (-qscale:a) + quality_value: Quality value + func_name: Calling function name for logging + + Returns: + Tuple of (converted_bytes, error_message) + """ + try: + with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as wav_file: + wav_file.write(wav_bytes) + wav_path = Path(wav_file.name) + + output_path = wav_path.with_suffix(f".{output_format}") + + result = subprocess.run( + [ + "ffmpeg", + "-i", + str(wav_path), + "-codec:a", + codec, + quality_arg, + quality_value, + "-y", + str(output_path), + ], + capture_output=True, + text=True, + check=False, + ) + + if result.returncode != 0: + error_msg = f"ffmpeg conversion failed: {result.stderr}" + logger.error(f"[{func_name}] {error_msg}") + wav_path.unlink(missing_ok=True) + output_path.unlink(missing_ok=True) + return None, error_msg + + with open(output_path, "rb") as f: + output_bytes = f.read() + + wav_path.unlink(missing_ok=True) + output_path.unlink(missing_ok=True) + + logger.info( + f"[{func_name}] Successfully converted WAV ({len(wav_bytes)} bytes) " + f"to {output_format.upper()} ({len(output_bytes)} bytes)" + ) + + return output_bytes, None + + except FileNotFoundError: + error_msg = "ffmpeg not found. Please install ffmpeg: brew install ffmpeg (macOS) or apt install ffmpeg (Linux)" + logger.error(f"[{func_name}] {error_msg}") + return None, error_msg + + except Exception as e: + error_msg = f"Unexpected error during audio conversion: {str(e)}" + logger.error(f"[{func_name}] {error_msg}", exc_info=True) + return None, error_msg + + +def convert_wav_to_mp3( + wav_bytes: bytes, is_raw_pcm: bool = True +) -> tuple[bytes | None, str | None]: + """Convert WAV audio bytes to MP3 format using ffmpeg. + + Args: + wav_bytes: WAV audio data or raw PCM data (16-bit) + is_raw_pcm: If True, treat input as raw PCM and add WAV headers first + + Returns: + Tuple of (mp3_bytes, error_message) + """ + if is_raw_pcm: + logger.info("[convert_wav_to_mp3] Converting raw PCM to WAV format first") + wav_bytes = convert_pcm_to_wav(wav_bytes) + + return _convert_audio_with_ffmpeg( + wav_bytes, "mp3", "libmp3lame", "-qscale:a", "2", "convert_wav_to_mp3" + ) + + +def convert_wav_to_ogg( + wav_bytes: bytes, is_raw_pcm: bool = True +) -> tuple[bytes | None, str | None]: + """Convert WAV audio bytes to OGG format using ffmpeg. + + Args: + wav_bytes: WAV audio data or raw PCM data (16-bit) + is_raw_pcm: If True, treat input as raw PCM and add WAV headers first + + Returns: + Tuple of (ogg_bytes, error_message) + """ + if is_raw_pcm: + logger.info("[convert_wav_to_ogg] Converting raw PCM to WAV format first") + wav_bytes = convert_pcm_to_wav(wav_bytes) + + return _convert_audio_with_ffmpeg( + wav_bytes, "ogg", "libvorbis", "-qscale:a", "5", "convert_wav_to_ogg" + ) diff --git a/backend/app/core/providers.py b/backend/app/core/providers.py index dfaae233a..2945e7dca 100644 --- a/backend/app/core/providers.py +++ b/backend/app/core/providers.py @@ -12,6 +12,7 @@ class Provider(str, Enum): OPENAI = "openai" AWS = "aws" LANGFUSE = "langfuse" + GOOGLE = "google" @dataclass @@ -30,21 +31,11 @@ class ProviderConfig: Provider.LANGFUSE: ProviderConfig( required_fields=["secret_key", "public_key", "host"] ), + Provider.GOOGLE: ProviderConfig(required_fields=["api_key"]), } def validate_provider(provider: str) -> Provider: - """Validate that the provider name is supported and return the Provider enum. - - Args: - provider: The provider name to validate - - Returns: - Provider: The validated provider enum - - Raises: - ValueError: If the provider is not supported - """ try: return Provider(provider.lower()) except ValueError: diff --git a/backend/app/crud/config/version.py b/backend/app/crud/config/version.py index f834c168b..957b71ece 100644 --- a/backend/app/crud/config/version.py +++ b/backend/app/crud/config/version.py @@ -1,13 +1,22 @@ import logging from uuid import UUID +from typing import Any from sqlmodel import Session, select, and_, func from fastapi import HTTPException from sqlalchemy.orm import defer +from pydantic import ValidationError from .config import ConfigCrud from app.core.util import now -from app.models import Config, ConfigVersion, ConfigVersionCreate, ConfigVersionItems +from app.models import ( + Config, + ConfigVersion, + ConfigVersionCreate, + ConfigVersionCreatePartial, + ConfigVersionItems, +) +from app.models.llm.request import ConfigBlob logger = logging.getLogger(__name__) @@ -26,8 +35,13 @@ def create_or_raise(self, version_create: ConfigVersionCreate) -> ConfigVersion: """ Create a new version for an existing configuration. Automatically increments the version number. + Validates that the config type (text/stt/tts) remains consistent. """ self._config_exists_or_raise(self.config_id) + + # Validate that config type doesn't change + self._validate_config_type_unchanged(version_create) + try: next_version = self._get_next_version(self.config_id) @@ -61,6 +75,139 @@ def create_or_raise(self, version_create: ConfigVersionCreate) -> ConfigVersion: detail="Unexpected error occurred: failed to create version", ) + def create_from_partial_or_raise( + self, version_create: ConfigVersionCreatePartial + ) -> ConfigVersion: + """ + Create a new version from a partial config update. + + Fetches the latest version, merges the partial config with it, + validates the result, and creates the new version. + + Fields like 'provider' and 'type' are inherited from the existing config + and cannot be changed. + """ + self._config_exists_or_raise(self.config_id) + + # Get the latest version (required for partial updates) + latest_version = self._get_latest_version() + if latest_version is None: + raise HTTPException( + status_code=400, + detail="Cannot create partial version: no existing version found. Use full config for initial version.", + ) + + # Merge partial config with existing config + merged_config = self._deep_merge( + base=latest_version.config_blob, + updates=version_create.config_blob, + ) + + # Validate that provider and type haven't been changed + self._validate_immutable_fields(latest_version.config_blob, merged_config) + + # Validate the merged config as ConfigBlob + try: + validated_blob = ConfigBlob.model_validate(merged_config) + except ValidationError as e: + logger.error( + f"[ConfigVersionCrud.create_from_partial] Validation failed | " + f"{{'config_id': '{self.config_id}', 'error': '{str(e)}'}}" + ) + raise HTTPException( + status_code=400, + detail=f"Invalid config after merge: {str(e)}", + ) + + try: + next_version = self._get_next_version(self.config_id) + + version = ConfigVersion( + config_id=self.config_id, + version=next_version, + config_blob=validated_blob.model_dump(), + commit_message=version_create.commit_message, + ) + + self.session.add(version) + self.session.commit() + self.session.refresh(version) + + logger.info( + f"[ConfigVersionCrud.create_from_partial] Version created successfully | " + f"{{'config_id': '{self.config_id}', 'version_id': '{version.id}'}}" + ) + + return version + + except Exception as e: + self.session.rollback() + logger.error( + f"[ConfigVersionCrud.create_from_partial] Failed to create version | " + f"{{'config_id': '{self.config_id}', 'error': '{str(e)}'}}", + exc_info=True, + ) + raise HTTPException( + status_code=500, + detail="Unexpected error occurred: failed to create version", + ) + + def _get_latest_version(self) -> ConfigVersion | None: + """Get the latest version for the config.""" + stmt = ( + select(ConfigVersion) + .where( + and_( + ConfigVersion.config_id == self.config_id, + ConfigVersion.deleted_at.is_(None), + ) + ) + .order_by(ConfigVersion.version.desc()) + .limit(1) + ) + return self.session.exec(stmt).first() + + def _deep_merge( + self, base: dict[str, Any], updates: dict[str, Any] + ) -> dict[str, Any]: + """ + Deep merge two dictionaries. + Values from 'updates' override values in 'base'. + Nested dicts are merged recursively. + """ + result = base.copy() + + for key, value in updates.items(): + if ( + key in result + and isinstance(result[key], dict) + and isinstance(value, dict) + ): + result[key] = self._deep_merge(result[key], value) + else: + result[key] = value + + return result + + def _validate_immutable_fields( + self, existing: dict[str, Any], merged: dict[str, Any] + ) -> None: + """ + Validate that immutable fields (type) haven't been changed. + Provider and model can change between versions. + """ + existing_completion = existing.get("completion", {}) + merged_completion = merged.get("completion", {}) + + existing_type = existing_completion.get("type") + merged_type = merged_completion.get("type") + + if existing_type != merged_type: + raise HTTPException( + status_code=400, + detail=f"Cannot change config type from '{existing_type}' to '{merged_type}'. Type is immutable.", + ) + def read_one(self, version_number: int) -> ConfigVersion | None: """ Read a specific configuration version by its version number. @@ -140,3 +287,55 @@ def _config_exists_or_raise(self, config_id: UUID) -> Config: """Check if a config exists in the project.""" config_crud = ConfigCrud(session=self.session, project_id=self.project_id) config_crud.exists_or_raise(config_id) + + def _validate_config_type_unchanged( + self, version_create: ConfigVersionCreate + ) -> None: + """ + Validate that the config type (text/stt/tts) in the new version matches + the type from the latest existing version. + Raises HTTPException if types don't match. + """ + # Get the latest version + stmt = ( + select(ConfigVersion) + .where( + and_( + ConfigVersion.config_id == self.config_id, + ConfigVersion.deleted_at.is_(None), + ) + ) + .order_by(ConfigVersion.version.desc()) + .limit(1) + ) + latest_version = self.session.exec(stmt).first() + + # If this is the first version, no validation needed + if latest_version is None: + return + + # Extract types from config blobs + old_type = latest_version.config_blob.get("completion", {}).get("type") + new_type = ( + version_create.config_blob.model_dump().get("completion", {}).get("type") + ) + + if old_type is None or new_type is None: + logger.error( + f"[ConfigVersionCrud._validate_config_type_unchanged] Missing type field | " + f"{{'config_id': '{self.config_id}', 'old_type': {old_type}, 'new_type': {new_type}}}" + ) + raise HTTPException( + status_code=400, + detail="Config type field is missing in configuration blob", + ) + + if old_type != new_type: + logger.warning( + f"[ConfigVersionCrud._validate_config_type_unchanged] Type mismatch | " + f"{{'config_id': '{self.config_id}', 'old_type': '{old_type}', 'new_type': '{new_type}'}}" + ) + raise HTTPException( + status_code=400, + detail=f"Cannot change config type from '{old_type}' to '{new_type}'. Config type must remain consistent across versions.", + ) diff --git a/backend/app/crud/evaluations/core.py b/backend/app/crud/evaluations/core.py index 6d17afe60..c19456fc4 100644 --- a/backend/app/crud/evaluations/core.py +++ b/backend/app/crud/evaluations/core.py @@ -442,5 +442,10 @@ def resolve_model_from_config( f"(config_id={eval_run.config_id}, version={eval_run.config_version}): {error}" ) - model = config.completion.params.model + # params is a dict, not a Pydantic model, so use dict access + model = config.completion.params.get("model") + if not model: + raise ValueError( + f"Config for evaluation {eval_run.id} does not contain a 'model' parameter" + ) return model diff --git a/backend/app/crud/llm.py b/backend/app/crud/llm.py new file mode 100644 index 000000000..9c07559cd --- /dev/null +++ b/backend/app/crud/llm.py @@ -0,0 +1,262 @@ +""" +CRUD operations for LLM calls. + +This module handles database operations for LLM calls including: +1. Creating new LLM call records +2. Updating LLM call responses +3. Fetching LLM calls by ID +""" + +import logging +from typing import Any, Literal + +from uuid import UUID +from sqlmodel import Session, select +import datetime +from app.core.util import now +import json +from app.models.llm import LlmCall, LLMCallRequest, ConfigBlob +from app.models.llm.request import ( + TextInput, + AudioBase64Input, + AudioUrlInput, + QueryInput, +) + +logger = logging.getLogger(__name__) + + +def serialize_input(query_input: QueryInput) -> str: + """Serialize query input for database storage. + + For text: stores the actual content + For audio_base64: stores metadata (type, mime_type, size) + For audio_url: stores the URL + """ + if isinstance(query_input, TextInput): + return query_input.content + elif isinstance(query_input, AudioBase64Input): + return json.dumps( + { + "type": "audio_base64", + "mime_type": query_input.mime_type, + "size_bytes": len(query_input.data), + } + ) + elif isinstance(query_input, AudioUrlInput): + return json.dumps( + { + "type": "audio_url", + "url": str(query_input.url), + } + ) + else: + return str(query_input) + + +def create_llm_call( + session: Session, + *, + request: LLMCallRequest, + job_id: UUID, + project_id: int, + organization_id: int, + resolved_config: ConfigBlob, + original_provider: str, +) -> LlmCall: + """ + Create a new LLM call record in the database. + + Args: + session: Database session + request: The LLM call request containing query and config + job_id: Reference to the parent job + project_id: Project this LLM call belongs to + organization_id: Organization this LLM call belongs to + resolved_config: The resolved configuration blob (either from stored config or ad-hoc) + + Returns: + LlmCall: The created LLM call record + """ + # Determine input/output types based on completion config type + completion_config = resolved_config.completion + completion_type = completion_config.type or getattr( + completion_config.params, "type", "text" + ) + + input_type: Literal["text", "audio", "image"] + output_type: Literal["text", "audio", "image"] | None + + if completion_type == "stt": + input_type = "audio" + output_type = "text" + elif completion_type == "tts": + input_type = "text" + output_type = "audio" + else: + input_type = "text" + output_type = "text" + + model = ( + completion_config.params.model + if hasattr(completion_config.params, "model") + else completion_config.params.get("model", "") + ) + + # Build config dict for storage + config_dict: dict[str, Any] + if request.config.is_stored_config: + config_dict = { + "config_id": str(request.config.id), + "config_version": request.config.version, + } + else: + config_dict = { + "config_blob": resolved_config.model_dump(), + } + + # Extract conversation info if present + conversation_id = None + auto_create = None + if request.query.conversation: + conversation_id = request.query.conversation.id + auto_create = request.query.conversation.auto_create + + db_llm_call = LlmCall( + job_id=job_id, + project_id=project_id, + organization_id=organization_id, + input=serialize_input(request.query.input), + input_type=input_type, + output_type=output_type, + provider=original_provider, + model=model, + conversation_id=conversation_id, + auto_create=auto_create, + config=config_dict, + ) + + session.add(db_llm_call) + session.commit() + session.refresh(db_llm_call) + + logger.info( + f"[create_llm_call] Created LLM call id={db_llm_call.id}, " + f"job_id={job_id}, provider={original_provider}, model={model}" + ) + + return db_llm_call + + +def update_llm_call_response( + session: Session, + *, + llm_call_id: UUID, + provider_response_id: str | None = None, + content: dict[str, Any] | None = None, + usage: dict[str, Any] | None = None, + conversation_id: str | None = None, +) -> LlmCall: + """ + Update an LLM call record with response data. + + Args: + session: Database session + llm_call_id: The LLM call record ID to update + provider_response_id: Original response ID from the provider + content: Response content dict + usage: Token usage dict + conversation_id: Conversation ID if created/updated + + Returns: + LlmCall: The updated LLM call record + + Raises: + ValueError: If the LLM call record is not found + """ + db_llm_call = session.get(LlmCall, llm_call_id) + if not db_llm_call: + raise ValueError(f"LLM call not found with id={llm_call_id}") + + if provider_response_id is not None: + db_llm_call.provider_response_id = provider_response_id + + if content is not None: + # For TTS responses: transform audio_bytes to metadata only + # (audio_bytes should already be converted to audio_base64 by LLMOutput validator, + # but handle it defensively in case it wasn't) + if "audio_bytes" in content: + import base64 + + audio_bytes = content.pop("audio_bytes") + if audio_bytes: + content["audio_size_bytes"] = len(audio_bytes) + # Convert to base64 for storage if not already done + if "audio_base64" not in content: + content["audio_base64"] = base64.b64encode(audio_bytes).decode( + "utf-8" + ) + logger.info( + f"[update_llm_call_response] Converted audio_bytes to audio_base64 for storage" + ) + + # Calculate audio size from base64 if present and audio_size_bytes not set + if "audio_base64" in content and "audio_size_bytes" not in content: + import base64 + + try: + audio_data = base64.b64decode(content["audio_base64"]) + content["audio_size_bytes"] = len(audio_data) + except Exception as e: + logger.warning( + f"[update_llm_call_response] Failed to calculate audio size: {e}" + ) + + db_llm_call.content = content + + if usage is not None: + db_llm_call.usage = usage + if conversation_id is not None: + db_llm_call.conversation_id = conversation_id + + db_llm_call.updated_at = datetime.datetime.now() + + session.add(db_llm_call) + session.commit() + session.refresh(db_llm_call) + + logger.info(f"[update_llm_call_response] Updated LLM call id={llm_call_id}") + + return db_llm_call + + +def get_llm_call_by_id( + session: Session, + llm_call_id: UUID, + project_id: int | None = None, +) -> LlmCall | None: + statement = select(LlmCall).where( + LlmCall.id == llm_call_id, + LlmCall.deleted_at.is_(None), + ) + + if project_id is not None: + statement = statement.where(LlmCall.project_id == project_id) + + return session.exec(statement).first() + + +def get_llm_calls_by_job_id( + session: Session, + job_id: UUID, +) -> list[LlmCall]: + statement = ( + select(LlmCall) + .where( + LlmCall.job_id == job_id, + LlmCall.deleted_at.is_(None), + ) + .order_by(LlmCall.created_at.desc()) + ) + + return list(session.exec(statement).all()) diff --git a/backend/app/models/__init__.py b/backend/app/models/__init__.py index a4d76ee2c..f3a588750 100644 --- a/backend/app/models/__init__.py +++ b/backend/app/models/__init__.py @@ -34,6 +34,7 @@ ConfigVersion, ConfigVersionBase, ConfigVersionCreate, + ConfigVersionCreatePartial, ConfigVersionPublic, ConfigVersionItems, ) @@ -94,6 +95,7 @@ CompletionConfig, LLMCallRequest, LLMCallResponse, + LlmCall, ) from .message import Message diff --git a/backend/app/models/config/__init__.py b/backend/app/models/config/__init__.py index fa34aa1d6..285b7f8a3 100644 --- a/backend/app/models/config/__init__.py +++ b/backend/app/models/config/__init__.py @@ -10,6 +10,7 @@ ConfigVersion, ConfigVersionBase, ConfigVersionCreate, + ConfigVersionCreatePartial, ConfigVersionPublic, ConfigVersionItems, ) @@ -23,6 +24,7 @@ "ConfigVersion", "ConfigVersionBase", "ConfigVersionCreate", + "ConfigVersionCreatePartial", "ConfigVersionItems", "ConfigVersionPublic", "ConfigWithVersion", diff --git a/backend/app/models/config/version.py b/backend/app/models/config/version.py index 5a374582e..b4bb3cce7 100644 --- a/backend/app/models/config/version.py +++ b/backend/app/models/config/version.py @@ -96,6 +96,26 @@ class ConfigVersionCreate(ConfigVersionBase): ) +class ConfigVersionCreatePartial(SQLModel): + """ + Partial update model for creating a new config version. + + Only the fields that need to change should be provided. + Fields like 'provider' and 'type' are inherited from the existing config + and cannot be changed. + """ + + config_blob: dict[str, Any] = Field( + description="Partial config blob. Only include fields you want to update. " + "Provider and type are inherited from existing config and cannot be changed.", + ) + commit_message: str | None = Field( + default=None, + max_length=512, + description="Optional message describing the changes in this version", + ) + + class ConfigVersionPublic(ConfigVersionBase): id: UUID = Field(description="Unique id for the configuration version") config_id: UUID = Field(description="Id of the parent configuration") diff --git a/backend/app/models/llm/__init__.py b/backend/app/models/llm/__init__.py index 8738e2126..43173f6df 100644 --- a/backend/app/models/llm/__init__.py +++ b/backend/app/models/llm/__init__.py @@ -6,5 +6,6 @@ KaapiLLMParams, KaapiCompletionConfig, NativeCompletionConfig, + LlmCall, ) from app.models.llm.response import LLMCallResponse, LLMResponse, LLMOutput, Usage diff --git a/backend/app/models/llm/request.py b/backend/app/models/llm/request.py index fc44235f9..7efd5c59e 100644 --- a/backend/app/models/llm/request.py +++ b/backend/app/models/llm/request.py @@ -1,24 +1,20 @@ from typing import Annotated, Any, Literal, Union -from uuid import UUID +from uuid import UUID, uuid4 from sqlmodel import Field, SQLModel from pydantic import Discriminator, model_validator, HttpUrl +from datetime import datetime +from app.core.util import now +import sqlalchemy as sa +from sqlalchemy.dialects.postgresql import JSONB +from sqlmodel import Field, SQLModel, Index, text -class KaapiLLMParams(SQLModel): - """ - Kaapi-abstracted parameters for LLM providers. - These parameters are mapped internally to provider-specific API parameters. - Provides a unified contract across all LLM providers (OpenAI, Claude, Gemini, etc.). - Provider-specific mappings are handled at the mapper level. - """ - model: str = Field( - description="Model identifier to use for completion (e.g., 'gpt-4o', 'gpt-5')", - ) +class TextLLMParams(SQLModel): + model: str instructions: str | None = Field( default=None, - description="System instructions to guide the model's behavior", ) knowledge_base_ids: list[str] | None = Field( default=None, @@ -32,13 +28,70 @@ class KaapiLLMParams(SQLModel): default=None, ge=0.0, le=2.0, - description="Sampling temperature between 0 and 2", ) max_num_results: int | None = Field( default=None, ge=1, - description="Maximum number of results to return", + description="Maximum number of candidate results to return", + ) + + +class STTLLMParams(SQLModel): + model: str + instructions: str + input_language: str | None = None + output_language: str | None = None + response_format: Literal["text"] | None = Field( + None, + description="Can take multiple response_format like text, json, verbose_json.", ) + temperature: float | None = Field( + default=0.2, + ge=0.0, + le=2.0, + ) + + +class TTSLLMParams(SQLModel): + model: str + voice: str + language: str + response_format: Literal["mp3", "wav", "ogg"] | None = "wav" + speed: float | None = Field(None, ge=0.25, le=4.0) + provider_specific: dict[str, Any] | None = Field( + default=None, + description="Provider-specific parameters (e.g., {'gemini': {'director_notes': '...', 'pitch': 1.5}})", + ) + + +KaapiLLMParams = Union[TextLLMParams, STTLLMParams, TTSLLMParams] + + +# Input type models for discriminated union +class TextInput(SQLModel): + type: Literal["text"] = "text" + content: str = Field(..., min_length=1, description="Text content") + + +class AudioBase64Input(SQLModel): + type: Literal["audio_base64"] = "audio_base64" + data: str = Field(..., min_length=1, description="Base64-encoded audio data") + mime_type: str = Field( + default="audio/wav", + description="MIME type of the audio (e.g., audio/wav, audio/mp3, audio/ogg)", + ) + + +class AudioUrlInput(SQLModel): + type: Literal["audio_url"] = "audio_url" + url: HttpUrl = Field(..., description="URL to fetch audio from") + + +# Discriminated union for query input types +QueryInput = Annotated[ + Union[TextInput, AudioBase64Input, AudioUrlInput], + Field(discriminator="type"), +] class ConversationConfig(SQLModel): @@ -71,16 +124,30 @@ def validate_conversation_logic(self): class QueryParams(SQLModel): """Query-specific parameters for each LLM call.""" - input: str = Field( + input: str | QueryInput = Field( ..., - min_length=1, - description="User input question/query/prompt, used to generate a response.", + description=( + "User input - either a plain string (text) or a structured input object. " + "Accepts: string, {type: 'text', content: '...'}, " + "{type: 'audio_base64', data: '...', mime_type: '...'}, " + "or {type: 'audio_url', url: '...'}." + ), ) conversation: ConversationConfig | None = Field( default=None, description="Conversation control configuration for context handling.", ) + @model_validator(mode="before") + @classmethod + def normalize_input(cls, data: Any) -> Any: + """Normalize plain string input to TextInput for consistency.""" + if isinstance(data, dict) and "input" in data: + input_val = data["input"] + if isinstance(input_val, str): + data["input"] = {"type": "text", "content": input_val} + return data + class NativeCompletionConfig(SQLModel): """ @@ -89,14 +156,17 @@ class NativeCompletionConfig(SQLModel): Supports any LLM provider's native API format. """ - provider: Literal["openai-native"] = Field( - default="openai-native", + provider: Literal["openai-native", "google-native"] = Field( + ..., description="Native provider type (e.g., openai-native)", ) params: dict[str, Any] = Field( ..., description="Provider-specific parameters (schema varies by provider), should exactly match the provider's endpoint params structure", ) + type: Literal["text", "stt", "tts"] = Field( + ..., description="Completion config type. Params schema varies by type" + ) class KaapiCompletionConfig(SQLModel): @@ -106,12 +176,31 @@ class KaapiCompletionConfig(SQLModel): Supports multiple providers: OpenAI, Claude, Gemini, etc. """ - provider: Literal["openai"] = Field(..., description="LLM provider (openai)") - params: KaapiLLMParams = Field( + provider: Literal["openai", "google"] = Field( + ..., description="LLM provider (openai)" + ) + + type: Literal["text", "stt", "tts"] = Field( + ..., description="Completion config type. Params schema varies by type" + ) + params: dict[str, Any] = Field( ..., description="Kaapi-standardized parameters mapped to provider-specific API", ) + # validate all these 3 config types + @model_validator(mode="after") + def validate_params(self): + param_models = { + "text": TextLLMParams, + "stt": STTLLMParams, + "tts": TTSLLMParams, + } + model_class = param_models[self.type] + validated = model_class.model_validate(self.params) + self.params = validated.model_dump(exclude_none=True) + return self + # Discriminated union for completion configs based on provider field CompletionConfig = Annotated[ @@ -223,3 +312,172 @@ class LLMCallRequest(SQLModel): "The exact dictionary provided here will be returned in the response metadata field." ), ) + + +class LlmCall(SQLModel, table=True): + """ + Database model for tracking LLM API call requests and responses. + + Stores both request inputs and response outputs for traceability, + supporting multimodal inputs (text, audio, image) and various completion types. + """ + + __tablename__ = "llm_call" + __table_args__ = ( + Index( + "idx_llm_call_job_id", + "job_id", + postgresql_where=text("deleted_at IS NULL"), + ), + Index( + "idx_llm_call_conversation_id", + "conversation_id", + postgresql_where=text("conversation_id IS NOT NULL AND deleted_at IS NULL"), + ), + ) + + id: UUID = Field( + default_factory=uuid4, + primary_key=True, + sa_column_kwargs={"comment": "Unique identifier for the LLM call record"}, + ) + + job_id: UUID = Field( + foreign_key="job.id", + nullable=False, + ondelete="CASCADE", + sa_column_kwargs={ + "comment": "Reference to the parent job (status tracked in job table)" + }, + ) + + project_id: int = Field( + foreign_key="project.id", + nullable=False, + ondelete="CASCADE", + sa_column_kwargs={ + "comment": "Reference to the project this LLM call belongs to" + }, + ) + + organization_id: int = Field( + foreign_key="organization.id", + nullable=False, + ondelete="CASCADE", + sa_column_kwargs={ + "comment": "Reference to the organization this LLM call belongs to" + }, + ) + + # Request fields + input: str = Field( + ..., + sa_column_kwargs={ + "comment": "User input - text string, binary data, or file path for multimodal" + }, + ) + + input_type: Literal["text", "audio", "image"] = Field( + ..., + sa_column=sa.Column( + sa.String, + nullable=False, + comment="Input type: text, audio, image", + ), + ) + + output_type: Literal["text", "audio", "image"] | None = Field( + default=None, + sa_column=sa.Column( + sa.String, + nullable=True, + comment="Expected output type: text, audio, image", + ), + ) + + # Provider and model info + provider: str = Field( + ..., + sa_column=sa.Column( + sa.String, + nullable=False, + comment="AI provider as sent by user (e.g openai, -native, google)", + ), + ) + + model: str = Field( + ..., + sa_column_kwargs={ + "comment": "Specific model used e.g. 'gpt-4o', 'gemini-2.5-pro'" + }, + ) + + # Response fields + provider_response_id: str | None = Field( + default=None, + sa_column_kwargs={ + "comment": "Original response ID from the provider (e.g., OpenAI's response ID)" + }, + ) + + content: dict[str, Any] | None = Field( + default=None, + sa_column=sa.Column( + JSONB, + nullable=True, + comment="Response content: {text: '...'}, {audio_bytes: '...'}, or {image: '...'}", + ), + ) + + usage: dict[str, Any] | None = Field( + default=None, + sa_column=sa.Column( + JSONB, + nullable=True, + comment="Token usage: {input_tokens, output_tokens, reasoning_tokens}", + ), + ) + + # Conversation tracking + conversation_id: str | None = Field( + default=None, + sa_column_kwargs={ + "comment": "Identifier linking this response to its conversation thread" + }, + ) + + auto_create: bool | None = Field( + default=None, + sa_column_kwargs={ + "comment": "Whether to auto-create conversation if conversation_id doesn't exist (OpenAI specific)" + }, + ) + + # Configuration - stores either {config_id, config_version} or {config_blob} + config: dict[str, Any] | None = Field( + default=None, + sa_column=sa.Column( + JSONB, + nullable=True, + comment="Configuration: {config_id, config_version} for stored config OR {config_blob} for ad-hoc config", + ), + ) + + # Timestamps + created_at: datetime = Field( + default_factory=now, + nullable=False, + sa_column_kwargs={"comment": "Timestamp when the LLM call was created"}, + ) + + updated_at: datetime = Field( + default_factory=now, + nullable=False, + sa_column_kwargs={"comment": "Timestamp when the LLM call was last updated"}, + ) + + deleted_at: datetime | None = Field( + default=None, + nullable=True, + sa_column_kwargs={"comment": "Timestamp when the record was soft-deleted"}, + ) diff --git a/backend/app/models/llm/response.py b/backend/app/models/llm/response.py index 34c9b9d9b..44d01981f 100644 --- a/backend/app/models/llm/response.py +++ b/backend/app/models/llm/response.py @@ -3,6 +3,9 @@ This module contains structured response models for LLM API calls. """ + +import base64 +from pydantic import model_validator from sqlmodel import SQLModel, Field @@ -10,12 +13,43 @@ class Usage(SQLModel): input_tokens: int output_tokens: int total_tokens: int + reasoning_tokens: int | None = None class LLMOutput(SQLModel): """Standardized output format for LLM responses.""" - text: str = Field(..., description="Primary text content of the LLM response.") + text: str | None = Field( + default=None, description="Text content (for text/STT responses)." + ) + audio_bytes: bytes | None = Field( + default=None, + exclude=True, + description="Binary audio data (for TTS responses, internal only).", + ) + audio_base64: str | None = Field( + default=None, description="Base64-encoded audio for API responses." + ) + audio_format: str | None = Field( + default=None, description="Audio format (mp3, wav, ogg)." + ) + + @model_validator(mode="after") + def validate_and_serialize_output(self): + """Validate output has content and serialize audio to base64.""" + # Ensure at least one output type is present + if not self.text and not self.audio_bytes and not self.audio_base64: + raise ValueError( + "LLMOutput must have either text, audio_bytes, or audio_base64" + ) + + # Convert audio_bytes to base64 for JSON serialization + if self.audio_bytes and not self.audio_base64: + self.audio_base64 = base64.b64encode(self.audio_bytes).decode("utf-8") + # Clear bytes to prevent serialization issues + self.audio_bytes = None + + return self class LLMResponse(SQLModel): diff --git a/backend/app/services/doctransform/zerox_transformer.py b/backend/app/services/doctransform/zerox_transformer.py index 321a6ba65..08df12b01 100644 --- a/backend/app/services/doctransform/zerox_transformer.py +++ b/backend/app/services/doctransform/zerox_transformer.py @@ -38,7 +38,7 @@ def transform(self, input_path: Path, output_path: Path) -> Path: f"ZeroxTransformer timed out for {input_path} (model={self.model})" ) raise RuntimeError( - f"ZeroxTransformer PDF extraction timed out after {10*60} seconds for {input_path}" + f"ZeroxTransformer PDF extraction timed out after {10 * 60} seconds for {input_path}" ) except Exception as e: logger.error( diff --git a/backend/app/services/llm/__init__.py b/backend/app/services/llm/__init__.py index 730a53fee..5ba7fa6ea 100644 --- a/backend/app/services/llm/__init__.py +++ b/backend/app/services/llm/__init__.py @@ -1,8 +1,5 @@ # Providers -from app.services.llm.providers import ( - BaseProvider, - OpenAIProvider, -) +from app.services.llm.providers import BaseProvider, OpenAIProvider, GoogleAIProvider from app.services.llm.providers import ( LLMProvider, get_llm_provider, diff --git a/backend/app/services/llm/input_resolver.py b/backend/app/services/llm/input_resolver.py new file mode 100644 index 000000000..d6070f0b9 --- /dev/null +++ b/backend/app/services/llm/input_resolver.py @@ -0,0 +1,134 @@ +import base64 +import logging +import tempfile +from pathlib import Path + +import requests + +from app.models.llm.request import ( + TextInput, + AudioBase64Input, + AudioUrlInput, + QueryInput, +) +from app.utils import validate_callback_url + + +logger = logging.getLogger(__name__) + + +def get_file_extension(mime_type: str) -> str: + """Map MIME type to file extension.""" + mime_to_ext = { + "audio/wav": ".wav", + "audio/wave": ".wav", + "audio/x-wav": ".wav", + "audio/mp3": ".mp3", + "audio/mpeg": ".mp3", + "audio/ogg": ".ogg", + "audio/flac": ".flac", + "audio/webm": ".webm", + "audio/mp4": ".mp4", + "audio/m4a": ".m4a", + } + return mime_to_ext.get(mime_type, ".audio") + + +# important!! +def resolve_input(query_input: QueryInput) -> tuple[str, str | None]: + """Resolve discriminated union input to content string. + + Args: + query_input: The input from QueryParams (TextInput, AudioBase64Input, or AudioUrlInput) + + Returns: + (content_string, None) on success - for text returns content, for audio returns temp file path + ("", error_message) on failure + """ + try: + if isinstance(query_input, TextInput): + return query_input.content, None + + elif isinstance(query_input, AudioBase64Input): + return resolve_audio_base64(query_input.data, query_input.mime_type) + + elif isinstance(query_input, AudioUrlInput): + return resolve_audio_url(str(query_input.url)) + + else: + return "", f"Unknown input type: {type(query_input)}" + + except Exception as e: + logger.error(f"[resolve_input] Failed to resolve input: {e}", exc_info=True) + return "", f"Failed to resolve input: {str(e)}" + + +def resolve_audio_base64(data: str, mime_type: str) -> tuple[str, str | None]: + """Decode base64 audio and write to temp file. Returns (file_path, error).""" + try: + audio_bytes = base64.b64decode(data) + except Exception as e: + return "", f"Invalid base64 audio data: {str(e)}" + + ext = get_file_extension(mime_type) + try: + with tempfile.NamedTemporaryFile( + suffix=ext, delete=False, prefix="audio_" + ) as tmp: + tmp.write(audio_bytes) + temp_path = tmp.name + + logger.info(f"[resolve_audio_base64] Wrote audio to temp file: {temp_path}") + return temp_path, None + except Exception as e: + return "", f"Failed to write audio to temp file: {str(e)}" + + +def resolve_audio_url(url: str) -> tuple[str, str | None]: + """Fetch audio from URL and write to temp file. Returns (file_path, error). + + Implements SSRF protection by: + - Validating URL is HTTPS-only + - Blocking private/link-local IP addresses + - Blocking cloud metadata endpoints + - Disabling redirects + """ + # Validate URL to prevent SSRF attacks + try: + validate_callback_url(url) + except ValueError as e: + logger.error(f"[resolve_audio_url] Invalid audio URL: {e}") + return "", f"Invalid audio URL: {str(e)}" + + try: + response = requests.get(url, timeout=60, allow_redirects=False) + response.raise_for_status() + except requests.Timeout: + return "", f"Timeout fetching audio from URL: {url}" + except requests.HTTPError as e: + return "", f"HTTP error fetching audio: {e.response.status_code}" + except Exception as e: + return "", f"Failed to fetch audio from URL: {str(e)}" + + content_type = response.headers.get("content-type", "audio/wav") + ext = get_file_extension(content_type.split(";")[0].strip()) + + try: + with tempfile.NamedTemporaryFile( + suffix=ext, delete=False, prefix="audio_" + ) as tmp: + tmp.write(response.content) + temp_path = tmp.name + + logger.info(f"[resolve_audio_url] Wrote audio to temp file: {temp_path}") + return temp_path, None + except Exception as e: + return "", f"Failed to write fetched audio to temp file: {str(e)}" + + +def cleanup_temp_file(file_path: str) -> None: + """Clean up a temporary file if it exists.""" + try: + Path(file_path).unlink(missing_ok=True) + except Exception as e: + logger.warning(f"[cleanup_temp_file] Failed to delete temp file: {e}") diff --git a/backend/app/services/llm/jobs.py b/backend/app/services/llm/jobs.py index f4700b51b..881e883bc 100644 --- a/backend/app/services/llm/jobs.py +++ b/backend/app/services/llm/jobs.py @@ -11,10 +11,13 @@ from app.crud.config import ConfigVersionCrud from app.crud.credentials import get_provider_credential from app.crud.jobs import JobCrud -from app.models import JobStatus, JobType, JobUpdate, LLMCallRequest +from app.crud.llm import create_llm_call, update_llm_call_response +from app.models import JobStatus, JobType, JobUpdate, LLMCallRequest, Job from app.models.llm.request import ConfigBlob, LLMCallConfig, KaapiCompletionConfig from app.services.llm.providers.registry import get_llm_provider from app.services.llm.mappers import transform_kaapi_config_to_native +from app.services.llm.input_resolver import resolve_input, cleanup_temp_file + from app.utils import APIResponse, send_callback logger = logging.getLogger(__name__) @@ -28,6 +31,14 @@ def start_job( job_crud = JobCrud(session=db) job = job_crud.create(job_type=JobType.LLM_API, trace_id=trace_id) + # Explicitly flush to ensure job is persisted before Celery task starts + db.flush() + db.commit() + + logger.info( + f"[start_job] Created job | job_id={job.id}, status={job.status}, project_id={project_id}" + ) + try: task_id = start_high_priority_job( function_path="app.services.llm.jobs.execute_job", @@ -136,6 +147,7 @@ def execute_job( config = request.config callback_response = None config_blob: ConfigBlob | None = None + llm_call_id: UUID | None = None # Track the LLM call record logger.info( f"[execute_job] Starting LLM job execution | job_id={job_id}, task_id={task_id}, " @@ -145,6 +157,26 @@ def execute_job( with Session(engine) as session: # Update job status to PROCESSING job_crud = JobCrud(session=session) + + # Debug: Try to fetch the job first + logger.info(f"[execute_job] Attempting to fetch job | job_id={job_id}") + job = session.get(Job, job_id) + if not job: + # Log all jobs to see what's in the database + from sqlmodel import select + + all_jobs = session.exec( + select(Job).order_by(Job.created_at.desc()).limit(5) + ).all() + logger.error( + f"[execute_job] Job not found! | job_id={job_id} | " + f"Recent jobs in DB: {[(j.id, j.status) for j in all_jobs]}" + ) + else: + logger.info( + f"[execute_job] Found job | job_id={job_id}, status={job.status}" + ) + job_crud.update( job_id=job_id, job_update=JobUpdate(status=JobStatus.PROCESSING) ) @@ -170,16 +202,26 @@ def execute_job( else: config_blob = config.blob + user_sent_config_provider = "" + try: # Transform Kaapi config to native config if needed (before getting provider) completion_config = config_blob.completion + + original_provider = ( + config_blob.completion.provider + ) # openai, google or prefixed + if isinstance(completion_config, KaapiCompletionConfig): completion_config, warnings = transform_kaapi_config_to_native( completion_config ) + if request.request_metadata is None: request.request_metadata = {} request.request_metadata.setdefault("warnings", []).extend(warnings) + else: + pass except Exception as e: callback_response = APIResponse.failure_response( error=f"Error processing configuration: {str(e)}", @@ -187,10 +229,39 @@ def execute_job( ) return handle_job_error(job_id, request.callback_url, callback_response) + # Create LLM call record before execution + try: + # Rebuild ConfigBlob with transformed native config + resolved_config_blob = ConfigBlob(completion=completion_config) + + llm_call = create_llm_call( + session, + request=request, + job_id=job_id, + project_id=project_id, + organization_id=organization_id, + resolved_config=resolved_config_blob, + original_provider=original_provider, + ) + llm_call_id = llm_call.id + logger.info( + f"[execute_job] Created LLM call record | llm_call_id={llm_call_id}, job_id={job_id}" + ) + except Exception as e: + logger.error( + f"[execute_job] Failed to create LLM call record: {str(e)} | job_id={job_id}", + exc_info=True, + ) + callback_response = APIResponse.failure_response( + error=f"Failed to create LLM call record: {str(e)}", + metadata=request.request_metadata, + ) + return handle_job_error(job_id, request.callback_url, callback_response) + try: provider_instance = get_llm_provider( session=session, - provider_type=completion_config.provider, # Now always native provider type + provider_type=completion_config.provider, # Now always native provider type i.e openai-native, google-native regardless project_id=project_id, organization_id=organization_id, ) @@ -213,17 +284,32 @@ def execute_job( if request.query.conversation and request.query.conversation.id: conversation_id = request.query.conversation.id + # Resolve input (handles text, audio_base64, audio_url) + resolved_input, resolve_error = resolve_input(request.query.input) + if resolve_error: + callback_response = APIResponse.failure_response( + error=resolve_error, + metadata=request.request_metadata, + ) + return handle_job_error(job_id, request.callback_url, callback_response) + # Apply Langfuse observability decorator to provider execute method decorated_execute = observe_llm_execution( credentials=langfuse_credentials, session_id=conversation_id, )(provider_instance.execute) - response, error = decorated_execute( - completion_config=completion_config, - query=request.query, - include_provider_raw_response=request.include_provider_raw_response, - ) + try: + response, error = decorated_execute( + completion_config=completion_config, + query=request.query, + resolved_input=resolved_input, + include_provider_raw_response=request.include_provider_raw_response, + ) + finally: + # Clean up temp files for audio inputs + if resolved_input and resolved_input != request.query.input: + cleanup_temp_file(resolved_input) if response: callback_response = APIResponse.success_response( @@ -238,6 +324,27 @@ def execute_job( with Session(engine) as session: job_crud = JobCrud(session=session) + # Update LLM call record with response data + if llm_call_id: + try: + update_llm_call_response( + session, + llm_call_id=llm_call_id, + provider_response_id=response.response.provider_response_id, + content=response.response.output.model_dump(), + usage=response.usage.model_dump(), + conversation_id=response.response.conversation_id, + ) + logger.info( + f"[execute_job] Updated LLM call record | llm_call_id={llm_call_id}" + ) + except Exception as e: + logger.error( + f"[execute_job] Failed to update LLM call record: {str(e)} | llm_call_id={llm_call_id}", + exc_info=True, + ) + # Don't fail the job if updating the record fails + job_crud.update( job_id=job_id, job_update=JobUpdate(status=JobStatus.SUCCESS) ) diff --git a/backend/app/services/llm/mappers.py b/backend/app/services/llm/mappers.py index 9e076aa9a..4b982b601 100644 --- a/backend/app/services/llm/mappers.py +++ b/backend/app/services/llm/mappers.py @@ -1,17 +1,17 @@ """Parameter mappers for converting Kaapi-abstracted parameters to provider-specific formats.""" import litellm -from app.models.llm import KaapiLLMParams, KaapiCompletionConfig, NativeCompletionConfig +from app.models.llm import KaapiCompletionConfig, NativeCompletionConfig -def map_kaapi_to_openai_params(kaapi_params: KaapiLLMParams) -> tuple[dict, list[str]]: +def map_kaapi_to_openai_params(kaapi_params: dict) -> tuple[dict, list[str]]: """Map Kaapi-abstracted parameters to OpenAI API parameters. This mapper transforms standardized Kaapi parameters into OpenAI-specific parameter format, enabling provider-agnostic interface design. Args: - kaapi_params: KaapiLLMParams instance with standardized parameters + kaapi_params: Dictionary with standardized Kaapi parameters Supported Mapping: - model → model @@ -29,65 +29,132 @@ def map_kaapi_to_openai_params(kaapi_params: KaapiLLMParams) -> tuple[dict, list openai_params = {} warnings = [] - support_reasoning = litellm.supports_reasoning( - model="openai/" + f"{kaapi_params.model}" - ) + model = kaapi_params.get("model") + reasoning = kaapi_params.get("reasoning") + temperature = kaapi_params.get("temperature") + instructions = kaapi_params.get("instructions") + knowledge_base_ids = kaapi_params.get("knowledge_base_ids") + max_num_results = kaapi_params.get("max_num_results") + + support_reasoning = litellm.supports_reasoning(model=f"openai/{model}") # Handle reasoning vs temperature mutual exclusivity if support_reasoning: - if kaapi_params.reasoning is not None: - openai_params["reasoning"] = {"effort": kaapi_params.reasoning} + if reasoning is not None: + openai_params["reasoning"] = {"effort": reasoning} - if kaapi_params.temperature is not None: + if temperature is not None: warnings.append( "Parameter 'temperature' was suppressed because the selected model " "supports reasoning, and temperature is ignored when reasoning is enabled." ) else: - if kaapi_params.reasoning is not None: + if reasoning is not None: warnings.append( "Parameter 'reasoning' was suppressed because the selected model " "does not support reasoning." ) - if kaapi_params.temperature is not None: - openai_params["temperature"] = kaapi_params.temperature + if temperature is not None: + openai_params["temperature"] = temperature - if kaapi_params.model: - openai_params["model"] = kaapi_params.model + if model: + openai_params["model"] = model - if kaapi_params.instructions: - openai_params["instructions"] = kaapi_params.instructions + if instructions: + openai_params["instructions"] = instructions - if kaapi_params.knowledge_base_ids: + if knowledge_base_ids: openai_params["tools"] = [ { "type": "file_search", - "vector_store_ids": kaapi_params.knowledge_base_ids, - "max_num_results": kaapi_params.max_num_results or 20, + "vector_store_ids": knowledge_base_ids, + "max_num_results": max_num_results or 20, } ] return openai_params, warnings +def map_kaapi_to_google_params(kaapi_params: dict) -> tuple[dict, list[str]]: + """Map Kaapi-abstracted parameters to Google AI (Gemini) API parameters. + + This mapper transforms standardized Kaapi parameters into Google-specific + parameter format for the Gemini API. + + Args: + kaapi_params: Dictionary with standardized Kaapi parameters + + Supported Mapping: + - model → model + - instructions → instructions (for STT prompts, if available) + - temperature -> temperature parameter (0-2) + + Returns: + Tuple of: + - Dictionary of Google AI API parameters ready to be passed to the API + - List of warnings describing suppressed or ignored parameters + """ + google_params = {} + warnings = [] + + # Model is present in all param types + google_params["model"] = kaapi_params.get("model") + + # Instructions for STT prompts + instructions = kaapi_params.get("instructions") + if instructions: + google_params["instructions"] = instructions + + temperature = kaapi_params.get("temperature") + + if temperature is not None: + google_params["temperature"] = temperature + + # Warn about unsupported parameters + if kaapi_params.get("knowledge_base_ids"): + warnings.append( + "Parameter 'knowledge_base_ids' is not supported by Google AI and was ignored." + ) + + if kaapi_params.get("reasoning") is not None: + warnings.append( + "Parameter 'reasoning' is not applicable for Google AI and was ignored." + ) + + return google_params, warnings + + def transform_kaapi_config_to_native( kaapi_config: KaapiCompletionConfig, ) -> tuple[NativeCompletionConfig, list[str]]: """Transform Kaapi completion config to native provider config with mapped parameters. - Currently supports OpenAI. Future: Claude, Gemini mappers. + Supports OpenAI and Google AI providers. Args: kaapi_config: KaapiCompletionConfig with abstracted parameters Returns: - NativeCompletionConfig with provider-native parameters ready for API + Tuple of: + - NativeCompletionConfig with provider-native parameters ready for API + - List of warnings for suppressed/ignored parameters """ if kaapi_config.provider == "openai": mapped_params, warnings = map_kaapi_to_openai_params(kaapi_config.params) return ( - NativeCompletionConfig(provider="openai-native", params=mapped_params), + NativeCompletionConfig( + provider="openai-native", params=mapped_params, type=kaapi_config.type + ), + warnings, + ) + + if kaapi_config.provider == "google": + mapped_params, warnings = map_kaapi_to_google_params(kaapi_config.params) + return ( + NativeCompletionConfig( + provider="google-native", params=mapped_params, type=kaapi_config.type + ), warnings, ) diff --git a/backend/app/services/llm/providers/PLAN.md b/backend/app/services/llm/providers/PLAN.md new file mode 100644 index 000000000..309a2ed04 --- /dev/null +++ b/backend/app/services/llm/providers/PLAN.md @@ -0,0 +1,88 @@ +This specification sheet is designed for implementing **Gemini 2.5 Pro TTS** via the `google-genai` Python SDK (AI Studio / API Key method). + +--- + +### **1. Core Identity & Endpoints** + +* **Model ID:** `gemini-2.5-pro-tts` (High-fidelity) or `gemini-2.5-flash-tts` (Low-latency). +* **Base URL:** `https://generativelanguage.googleapis.com/v1beta` +* **Auth Type:** API Key (`x-goog-api-key` header). + +### **2. Technical Specification Table** + +| Component | SDK Property / Path | Type | Constraints | +| --- | --- | --- | --- | +| **Response Modality** | `response_modalities` | `list[str]` | Must be **`["AUDIO"]`** | +| **Voice Selection** | `speech_config.voice_config.prebuilt_voice_config.voice_name` | `string` | e.g., `Aoede`, `Kore`, `Fenrir` (See Section 3) | +| **Language** | `speech_config.language_code` | `string` | BCP-47 code (e.g., `en-US`, `hi-IN`) | +| **Speed (Rate)** | `audio_config.speaking_rate` | `float` | Range: `0.25` to `4.0` (Default: `1.0`) | +| **Pitch** | `audio_config.pitch` | `float` | Range: `-20.0` to `20.0` | +| **Volume Gain** | `audio_config.volume_gain_db` | `float` | Range: `-96.0` to `16.0` | +| **Output Format** | `audio_config.audio_encoding` | `enum` | `MP3`, `LINEAR16` (WAV), `OGG_OPUS` | +| **Director Notes** | `system_instruction` | `string` | Natural language (e.g., "Speak sadly", "Professional") | + +--- + +### **3. Voice Catalog (Common Personas)** + +Gemini 2.5 voices are "Instruction-Aware." Use these IDs in the `voice_name` field: + +* **`Aoede`**: Neutral, Breezy (Best for general narration). +* **`Kore`**: Firm, Professional (Best for corporate/assistants). +* **`Fenrir`**: Excitable, High-energy (Best for gaming/ads). +* **`Leda`**: Youthful, Bright. +* **`Charon`**: Informative, Mature. + +--- + +### **4. Implementation Pattern (Python SDK)** + +```python +from google import genai +from google.genai import types + +client = genai.Client(api_key="GEMINI_API_KEY") + +# 1:1 Mapping to your JSON schema requirements +config = types.GenerateContentConfig( + response_modalities=["AUDIO"], + # Maps your 'director_notes' + system_instruction="Speak with a professional, calm tone. Pause for 1 second between sentences.", + speech_config=types.SpeechConfig( + voice_config=types.VoiceConfig( + prebuilt_voice_config=types.PrebuiltVoiceConfig( + voice_name="Kore" # Mapping your 'voice' + ) + ), + language_code="en-US" + ), + audio_config=types.AudioConfig( + audio_encoding="MP3", # Mapping your 'response_format' + speaking_rate=1.0 # Mapping your 'speed' + ) +) + +response = client.models.generate_content( + model="gemini-2.5-pro-tts", + contents="Hello world. This is a technical test of the Gemini TTS pipeline.", + config=config +) + +# Extract binary data +audio_bytes = response.candidates[0].content.parts[0].inline_data.data + +with open("output.mp3", "wb") as f: + f.write(audio_bytes) + +``` + +### **5. Important Usage Notes** + +1. **Instruction Priority:** If you set `speaking_rate=2.0` and also put "Speak very slowly" in the `system_instruction`, the model may produce erratic results. Use **natural language** for "tone" and **programmatic fields** for "fixed pacing." +2. **Streaming:** The `google-genai` SDK supports streaming audio bytes for real-time applications via the `models.generate_content_stream` method, but `audio_encoding` must be `LINEAR16` or `PCM` for minimum latency. +3. **SynthID:** Note that all output audio is watermarked with Google's SynthID for safety tracking. +4. You are required to go thorugh the implementation of _execute_stt function inside the app/services/llm/providers.gai.py and relevant models inside app/models/llm as well. +5. Make sure to follow the celery task queue structure and pydantic models idioms and database schema. +6. The generated audio is not saved in the database, so store some metadata for now. +7. Make sure to follow the TTS config types, (KaapiLLMConfig and Native LLM Configs and relevant models for config and versions). +8. Do not write over abstracted code. Focus on readability than pristine JAVA-like code. diff --git a/backend/app/services/llm/providers/__init__.py b/backend/app/services/llm/providers/__init__.py index 7b95ee3f6..e8474553f 100644 --- a/backend/app/services/llm/providers/__init__.py +++ b/backend/app/services/llm/providers/__init__.py @@ -1,5 +1,6 @@ from app.services.llm.providers.base import BaseProvider -from app.services.llm.providers.openai import OpenAIProvider +from app.services.llm.providers.oai import OpenAIProvider +from app.services.llm.providers.gai import GoogleAIProvider from app.services.llm.providers.registry import ( LLMProvider, get_llm_provider, diff --git a/backend/app/services/llm/providers/base.py b/backend/app/services/llm/providers/base.py index 827f25910..d8f7cafe7 100644 --- a/backend/app/services/llm/providers/base.py +++ b/backend/app/services/llm/providers/base.py @@ -31,11 +31,20 @@ def __init__(self, client: Any): """ self.client = client + @staticmethod + @abstractmethod + def create_client(credentials: dict[str, Any]) -> Any: + """ + Static method to instantiate a client instance of the provider + """ + raise NotImplementedError("Providers must implement create_client method") + @abstractmethod def execute( self, completion_config: NativeCompletionConfig, query: QueryParams, + resolved_input: str, include_provider_raw_response: bool = False, ) -> tuple[LLMCallResponse | None, str | None]: """Execute LLM API call. @@ -45,6 +54,7 @@ def execute( Args: completion_config: LLM completion configuration, pass params as-is to provider API query: Query parameters including input and conversation_id + resolved_input: The resolved input content (text string or file path for audio) include_provider_raw_response: Whether to include the raw LLM provider response in the output Returns: diff --git a/backend/app/services/llm/providers/gai.py b/backend/app/services/llm/providers/gai.py new file mode 100644 index 000000000..77ae931b4 --- /dev/null +++ b/backend/app/services/llm/providers/gai.py @@ -0,0 +1,364 @@ +import logging + +from google import genai +from google.genai.types import GenerateContentResponse +from typing import Any + +from app.models.llm import ( + NativeCompletionConfig, + LLMCallResponse, + QueryParams, + LLMOutput, + LLMResponse, + Usage, +) +from app.services.llm.providers.base import BaseProvider + + +logger = logging.getLogger(__name__) + + +class GoogleAIProvider(BaseProvider): + def __init__(self, client: genai.Client): + """Initialize Google AI provider with client. + + Args: + client: Google AI client instance + """ + super().__init__(client) + self.client = client + + @staticmethod + def create_client(credentials: dict[str, Any]) -> Any: + if "api_key" not in credentials: + raise ValueError("API Key for Google Gemini Not Set") + return genai.Client(api_key=credentials["api_key"]) + + def _execute_stt( + self, + completion_config: NativeCompletionConfig, + resolved_input: str, + include_provider_raw_response: bool = False, + ) -> tuple[LLMCallResponse | None, str | None]: + """Execute speech-to-text completion using Google AI. + + Args: + completion_config: Configuration for the completion request + resolved_input: File path to the audio input + include_provider_raw_response: Whether to include raw provider response + + Returns: + Tuple of (LLMCallResponse, error_message) + """ + provider = completion_config.provider + generation_params = completion_config.params + + # Validate input is a file path string + if not isinstance(resolved_input, str): + return None, f"{provider} STT requires file path as string" + + model = generation_params.get("model") + if not model: + return None, "Missing 'model' in native params" + + instructions = generation_params.get("instructions", "") + input_language = generation_params.get("input_language") or "auto" + output_language = generation_params.get("output_language", "") + + # Build transcription/translation instruction + if input_language == "auto": + lang_instruction = ( + "Detect the spoken language automatically and transcribe the audio" + ) + else: + lang_instruction = f"Transcribe the audio from {input_language} in the native script of {input_language}" + + if output_language and output_language != input_language: + lang_instruction += f" and translate to {output_language} in the native script of {output_language}" + + forced_trascription_text = "Only return transcribed text and no other text." + # Merge user instructions with language instructions + if instructions: + merged_instruction = ( + f"{instructions}. {lang_instruction}. {forced_trascription_text}" + ) + else: + merged_instruction = f"{lang_instruction}. {forced_trascription_text}" + + # Upload file and generate content + gemini_file = self.client.files.upload(file=resolved_input) + + contents = [] + if merged_instruction: + contents.append(merged_instruction) + contents.append(gemini_file) + + response: GenerateContentResponse = self.client.models.generate_content( + model=model, contents=contents + ) + + # Validate response has required fields + if not response.response_id: + return None, "Google AI response missing response_id" + + if not response.text: + return None, "Google AI response missing text content" + + # Extract usage metadata with null checks + if response.usage_metadata: + input_tokens = response.usage_metadata.prompt_token_count or 0 + output_tokens = response.usage_metadata.candidates_token_count or 0 + total_tokens = response.usage_metadata.total_token_count or 0 + reasoning_tokens = response.usage_metadata.thoughts_token_count or 0 + else: + logger.warning( + f"[GoogleAIProvider._execute_stt] Response missing usage_metadata, using zeros" + ) + input_tokens = 0 + output_tokens = 0 + total_tokens = 0 + reasoning_tokens = 0 + + # Build response + llm_response = LLMCallResponse( + response=LLMResponse( + provider_response_id=response.response_id, + model=response.model_version or model, + provider=provider, + output=LLMOutput(text=response.text), + ), + usage=Usage( + input_tokens=input_tokens, + output_tokens=output_tokens, + total_tokens=total_tokens, + reasoning_tokens=reasoning_tokens, + ), + ) + + if include_provider_raw_response: + llm_response.provider_raw_response = response.model_dump() + + logger.info( + f"[GoogleAIProvider._execute_stt] Successfully generated STT response: {response.response_id}" + ) + + return llm_response, None + + def _execute_tts( + self, + completion_config: NativeCompletionConfig, + resolved_input: str, + include_provider_raw_response: bool = False, + ) -> tuple[LLMCallResponse | None, str | None]: + """Execute text-to-speech completion using Google AI. + + Args: + completion_config: Configuration for the completion request + resolved_input: Text string to synthesize + include_provider_raw_response: Whether to include raw provider response + + Returns: + Tuple of (LLMCallResponse, error_message) + """ + provider = completion_config.provider + generation_params = completion_config.params + + # Validate input is a text string + if not isinstance(resolved_input, str): + return None, f"{provider} TTS requires text string as input" + + if not resolved_input.strip(): + return None, "Text input cannot be empty" + + # Extract required params + model = generation_params.get("model") + if not model: + return None, "Missing 'model' in native params" + + voice = generation_params.get("voice") + if not voice: + return None, "Missing 'voice' in native params" + + language = generation_params.get("language") + if not language: + return None, "Missing 'language' in native params" + + # Extract optional params + speed = generation_params.get("speed", 1.0) + response_format = generation_params.get("response_format", "wav") + + # Extract Gemini-specific params from provider_specific.gemini + provider_specific = generation_params.get("provider_specific", {}) + gemini_params = provider_specific.get("gemini", {}) + + director_notes = gemini_params.get("director_notes") + pitch = gemini_params.get("pitch") + volume_gain_db = gemini_params.get("volume_gain_db") + + # Build Gemini TTS config + from google.genai import types + + # Note: Current google-genai SDK doesn't support AudioConfig or parameters + # like speaking_rate, pitch, volume_gain_db. These may be added in future SDK versions. + # For now, we only use the available TTS parameters. + config_kwargs = { + "response_modalities": ["AUDIO"], + "speech_config": types.SpeechConfig( + voice_config=types.VoiceConfig( + prebuilt_voice_config=types.PrebuiltVoiceConfig(voice_name=voice) + ), + language_code=language, + ), + } + + if director_notes: + config_kwargs["system_instruction"] = director_notes + + # Log warning if unsupported parameters were provided + if speed and speed != 1.0: + logger.warning( + f"[GoogleAIProvider._execute_tts] speed parameter ({speed}) is not supported by current SDK version" + ) + if pitch is not None: + logger.warning( + f"[GoogleAIProvider._execute_tts] pitch parameter ({pitch}) is not supported by current SDK version" + ) + if volume_gain_db is not None: + logger.warning( + f"[GoogleAIProvider._execute_tts] volume_gain_db parameter ({volume_gain_db}) is not supported by current SDK version" + ) + if response_format and response_format != "wav": + logger.warning( + f"[GoogleAIProvider._execute_tts] response_format ({response_format}) selection is not supported by current SDK version, using default" + ) + + config = types.GenerateContentConfig(**config_kwargs) + + # Execute TTS + response: GenerateContentResponse = self.client.models.generate_content( + model=model, contents=resolved_input, config=config + ) + + # Validate response + if not response.response_id: + return None, "Google AI response missing response_id" + + # Extract audio bytes + try: + audio_bytes = response.candidates[0].content.parts[0].inline_data.data + except (IndexError, AttributeError) as e: + return None, f"Failed to extract audio from response: {str(e)}" + + if not audio_bytes: + return None, "Google AI response missing audio data" + + # Post-process audio format conversion if needed + # Gemini TTS natively outputs 24kHz 16-bit PCM (WAV format) + actual_format = "wav" # Native Gemini TTS output format + + if response_format and response_format != "wav": + # Need to convert from WAV to requested format + from app.core.audio_utils import convert_wav_to_mp3, convert_wav_to_ogg + + logger.info( + f"[GoogleAIProvider._execute_tts] Converting audio from WAV to {response_format}" + ) + + if response_format == "mp3": + converted_bytes, convert_error = convert_wav_to_mp3(audio_bytes) + if convert_error: + return None, f"Failed to convert audio to MP3: {convert_error}" + audio_bytes = converted_bytes + actual_format = "mp3" + + elif response_format == "ogg": + converted_bytes, convert_error = convert_wav_to_ogg(audio_bytes) + if convert_error: + return None, f"Failed to convert audio to OGG: {convert_error}" + audio_bytes = converted_bytes + actual_format = "ogg" + + logger.info( + f"[GoogleAIProvider._execute_tts] Audio conversion successful: {actual_format.upper()} ({len(audio_bytes)} bytes)" + ) + + # Extract usage metadata + if response.usage_metadata: + input_tokens = response.usage_metadata.prompt_token_count or 0 + output_tokens = response.usage_metadata.candidates_token_count or 0 + total_tokens = response.usage_metadata.total_token_count or 0 + reasoning_tokens = response.usage_metadata.thoughts_token_count or 0 + else: + logger.warning( + f"[GoogleAIProvider._execute_tts] Response missing usage_metadata, using zeros" + ) + input_tokens = 0 + output_tokens = 0 + total_tokens = 0 + reasoning_tokens = 0 + + # Build response + llm_response = LLMCallResponse( + response=LLMResponse( + provider_response_id=response.response_id, + model=response.model_version or model, + provider=provider, + output=LLMOutput(audio_bytes=audio_bytes, audio_format=actual_format), + ), + usage=Usage( + input_tokens=input_tokens, + output_tokens=output_tokens, + total_tokens=total_tokens, + reasoning_tokens=reasoning_tokens, + ), + ) + + if include_provider_raw_response: + llm_response.provider_raw_response = response.model_dump() + + logger.info( + f"[GoogleAIProvider._execute_tts] Successfully generated TTS response: " + f"{response.response_id}, audio_size={len(audio_bytes)} bytes" + ) + + return llm_response, None + + def execute( + self, + completion_config: NativeCompletionConfig, + query: QueryParams, # Not used by Google AI provider (no conversation support yet) + resolved_input: str, + include_provider_raw_response: bool = False, + ) -> tuple[LLMCallResponse | None, str | None]: + try: + completion_type = completion_config.type + + if completion_type == "stt": + return self._execute_stt( + completion_config=completion_config, + resolved_input=resolved_input, + include_provider_raw_response=include_provider_raw_response, + ) + elif completion_type == "tts": + return self._execute_tts( + completion_config=completion_config, + resolved_input=resolved_input, + include_provider_raw_response=include_provider_raw_response, + ) + else: + return ( + None, + f"Unsupported completion type '{completion_type}' for Google AI provider", + ) + + except TypeError as e: + # handle unexpected arguments gracefully + error_message = f"Invalid or unexpected parameter in Config: {str(e)}" + return None, error_message + + except Exception as e: + error_message = "Unexpected error occurred" + logger.error( + f"[GoogleAIProvider.execute] {error_message}: {str(e)}", exc_info=True + ) + return None, error_message diff --git a/backend/app/services/llm/providers/openai.py b/backend/app/services/llm/providers/oai.py similarity index 90% rename from backend/app/services/llm/providers/openai.py rename to backend/app/services/llm/providers/oai.py index 34e35e17e..71ff66565 100644 --- a/backend/app/services/llm/providers/openai.py +++ b/backend/app/services/llm/providers/oai.py @@ -4,6 +4,7 @@ from openai import OpenAI from openai.types.responses.response import Response +from typing import Any from app.models.llm import ( NativeCompletionConfig, LLMCallResponse, @@ -28,10 +29,17 @@ def __init__(self, client: OpenAI): super().__init__(client) self.client = client + @staticmethod + def create_client(credentials: dict[str, Any]) -> Any: + if "api_key" not in credentials: + raise ValueError("OpenAI credentials not configured for this project.") + return OpenAI(api_key=credentials["api_key"]) + def execute( self, completion_config: NativeCompletionConfig, query: QueryParams, + resolved_input: str, include_provider_raw_response: bool = False, ) -> tuple[LLMCallResponse | None, str | None]: response: Response | None = None @@ -41,7 +49,7 @@ def execute( params = { **completion_config.params, } - params["input"] = query.input + params["input"] = resolved_input conversation_cfg = query.conversation diff --git a/backend/app/services/llm/providers/registry.py b/backend/app/services/llm/providers/registry.py index f5d17971f..5a0b1ad76 100644 --- a/backend/app/services/llm/providers/registry.py +++ b/backend/app/services/llm/providers/registry.py @@ -1,12 +1,10 @@ import logging - from sqlmodel import Session -from openai import OpenAI from app.crud import get_provider_credential from app.services.llm.providers.base import BaseProvider -from app.services.llm.providers.openai import OpenAIProvider - +from app.services.llm.providers.oai import OpenAIProvider +from app.services.llm.providers.gai import GoogleAIProvider logger = logging.getLogger(__name__) @@ -16,23 +14,23 @@ class LLMProvider: OPENAI = "openai" # Future constants for native providers: # CLAUDE_NATIVE = "claude-native" - # GEMINI_NATIVE = "gemini-native" + GOOGLE_NATIVE = "google-native" _registry: dict[str, type[BaseProvider]] = { OPENAI_NATIVE: OpenAIProvider, OPENAI: OpenAIProvider, # Future native providers: # CLAUDE_NATIVE: ClaudeProvider, - # GEMINI_NATIVE: GeminiProvider, + GOOGLE_NATIVE: GoogleAIProvider, } @classmethod - def get(cls, name: str) -> type[BaseProvider]: + def get_provider_class(cls, provider_type: str) -> type[BaseProvider]: """Return the provider class for a given name.""" - provider = cls._registry.get(name) + provider = cls._registry.get(provider_type) if not provider: raise ValueError( - f"Provider '{name}' is not supported. " + f"Provider '{provider_type}' is not supported. " f"Supported providers: {', '.join(cls._registry.keys())}" ) return provider @@ -46,7 +44,7 @@ def supported_providers(cls) -> list[str]: def get_llm_provider( session: Session, provider_type: str, project_id: int, organization_id: int ) -> BaseProvider: - provider_class = LLMProvider.get(provider_type) + provider_class = LLMProvider.get_provider_class(provider_type) # e.g., "openai-native" → "openai", "claude-native" → "claude" credential_provider = provider_type.replace("-native", "") @@ -63,14 +61,98 @@ def get_llm_provider( f"Credentials for provider '{credential_provider}' not configured for this project." ) - if provider_type == LLMProvider.OPENAI_NATIVE: - if "api_key" not in credentials: - raise ValueError("OpenAI credentials not configured for this project.") - client = OpenAI(api_key=credentials["api_key"]) + try: + client = provider_class.create_client(credentials=credentials) + return provider_class(client=client) + except ValueError: + # Re-raise ValueError for credential/configuration errors + raise + except Exception as e: + logger.error(f"Failed to initialize {provider_type} client: {e}", exc_info=True) + raise RuntimeError(f"Could not connect to {provider_type} services.") + + +# ad hoc testing code +if __name__ == "__main__": + import os + import base64 + from pathlib import Path + from dotenv import load_dotenv + from app.models.llm import NativeCompletionConfig, QueryParams + + load_dotenv() + + # 1. Simulate environment/credentials + GEMINI_KEY = os.getenv("GEMINI_API_KEY") + if not GEMINI_KEY: + print("Set GEMINI_API_KEY environment variable first.") + exit(1) + + # This dictionary mimics what get_provider_credential would return from the DB + mock_credentials = {"api_key": GEMINI_KEY} + + # 2. Idiomatic Initialization via Registry + provider_type = "google-native" + + print(f"Initializing provider: {provider_type}...") + + # This block mimics the core logic of your get_llm_provider function + ProviderClass = LLMProvider.get_provider_class(provider_type) + client = ProviderClass.create_client(credentials=mock_credentials) + instance = ProviderClass(client=client) + + # 3. Setup TTS Config and Query + test_config = NativeCompletionConfig( + provider="google-native", + type="tts", + params={ + "model": "gemini-2.5-pro-preview-tts", + "voice": "Kore", + "language": "hi-IN", + "response_format": "mp3", + }, + ) + + test_text = ( + "हाँ आपको एक बार डॉक्यूमेंट जमा करने के बाद पाँच से सात दिन का टाइम लगता है" + ) + test_query = QueryParams(input=test_text) + + # 4. Execution + print(f"Executing TTS for text: '{test_text}'...") + result, error = instance.execute( + completion_config=test_config, query=test_query, resolved_input=test_text + ) + + if error: + print(f"❌ Error: {error}") else: - logger.error( - f"[get_llm_provider] Unsupported provider type requested: {provider_type}" - ) - raise ValueError(f"Provider '{provider_type}' is not supported.") + print(f"✅ TTS Success!") + print(f" Provider Response ID: {result.response.provider_response_id}") + print(f" Model: {result.response.model}") + print(f" Usage: {result.usage.total_tokens} tokens") + + # 5. Save audio to file + output_dir = Path("speech_output") + output_dir.mkdir(exist_ok=True) + + # Get audio data (already base64 encoded by LLMOutput validator) + if result.response.output.audio_base64: + audio_bytes = base64.b64decode(result.response.output.audio_base64) + + # Use the actual format from the response + audio_format = result.response.output.audio_format or "wav" + output_file = ( + output_dir + / f"tts_test_{result.response.provider_response_id[:8]}.{audio_format}" + ) + + with open(output_file, "wb") as f: + f.write(audio_bytes) - return provider_class(client=client) + print(f" Audio saved to: {output_file}") + print(f" Audio size: {len(audio_bytes)} bytes") + print(f" Audio format: {audio_format.upper()}") + print(f"\n🎵 Play the audio with: afplay {output_file}") + else: + print(" ⚠️ No audio data in response") diff --git a/backend/app/tests/api/routes/configs/test_config.py b/backend/app/tests/api/routes/configs/test_config.py index 6953f7387..5ff36b252 100644 --- a/backend/app/tests/api/routes/configs/test_config.py +++ b/backend/app/tests/api/routes/configs/test_config.py @@ -19,7 +19,8 @@ def test_create_config_success( "description": "A test LLM configuration", "config_blob": { "completion": { - "provider": "openai-native", + "provider": "openai", + "type": "text", "params": { "model": "gpt-4", "temperature": 0.8, @@ -45,7 +46,17 @@ def test_create_config_success( assert "id" in data["data"] assert "version" in data["data"] assert data["data"]["version"]["version"] == 1 - assert data["data"]["version"]["config_blob"] == config_data["config_blob"] + # Kaapi config params are normalized - invalid fields like max_tokens are stripped + assert data["data"]["version"]["config_blob"]["completion"]["provider"] == "openai" + assert data["data"]["version"]["config_blob"]["completion"]["type"] == "text" + assert ( + data["data"]["version"]["config_blob"]["completion"]["params"]["model"] + == "gpt-4" + ) + assert ( + data["data"]["version"]["config_blob"]["completion"]["params"]["temperature"] + == 0.8 + ) def test_create_config_empty_blob_fails( @@ -88,6 +99,7 @@ def test_create_config_duplicate_name_fails( "config_blob": { "completion": { "provider": "openai", + "type": "text", "params": {"model": "gpt-4"}, } }, diff --git a/backend/app/tests/api/routes/configs/test_version.py b/backend/app/tests/api/routes/configs/test_version.py index 592233511..b5a4ad414 100644 --- a/backend/app/tests/api/routes/configs/test_version.py +++ b/backend/app/tests/api/routes/configs/test_version.py @@ -19,17 +19,17 @@ def test_create_version_success( client: TestClient, user_api_key: TestAuthContext, ) -> None: - """Test creating a new version for a config successfully.""" + """Test creating a new version with partial config update.""" config = create_test_config( db=db, project_id=user_api_key.project_id, name="test-config", ) + # Only send the fields we want to update (partial update) version_data = { "config_blob": { "completion": { - "provider": "openai-native", "params": { "model": "gpt-4-turbo", "temperature": 0.9, @@ -52,34 +52,16 @@ def test_create_version_success( assert ( data["data"]["version"] == 2 ) # First version created with config, this is second - assert data["data"]["config_blob"] == version_data["config_blob"] assert data["data"]["commit_message"] == version_data["commit_message"] assert data["data"]["config_id"] == str(config.id) + # Verify params were updated + config_blob = data["data"]["config_blob"] + assert config_blob["completion"]["params"]["model"] == "gpt-4-turbo" + assert config_blob["completion"]["params"]["temperature"] == 0.9 -def test_create_version_empty_blob_fails( - db: Session, - client: TestClient, - user_api_key: TestAuthContext, -) -> None: - """Test that creating a version with empty config_blob fails validation.""" - config = create_test_config( - db=db, - project_id=user_api_key.project_id, - name="test-config", - ) - - version_data = { - "config_blob": {}, - "commit_message": "Empty blob", - } - - response = client.post( - f"{settings.API_V1_STR}/configs/{config.id}/versions", - headers={"X-API-KEY": user_api_key.key}, - json=version_data, - ) - assert response.status_code == 422 + # Verify type was inherited from existing config + assert config_blob["completion"]["type"] == "text" def test_create_version_nonexistent_config( @@ -303,6 +285,7 @@ def test_get_version_by_number( config_blob=ConfigBlob( completion=NativeCompletionConfig( provider="openai-native", + type="text", params={"model": "gpt-4-turbo", "temperature": 0.5}, ) ), @@ -483,3 +466,413 @@ def test_versions_isolated_by_project( headers={"X-API-KEY": user_api_key.key}, ) assert response.status_code == 404 + + +def test_create_version_cannot_change_type_from_text_to_stt( + db: Session, + client: TestClient, + user_api_key: TestAuthContext, +) -> None: + """Test that config type cannot be changed from 'text' to 'stt' in a new version.""" + from app.models.llm.request import KaapiCompletionConfig, TextLLMParams + + # Create initial config with type='text' + config_blob = ConfigBlob( + completion=KaapiCompletionConfig( + provider="openai", + type="text", + params={"model": "gpt-4", "temperature": 0.7}, + ) + ) + config = create_test_config( + db=db, + project_id=user_api_key.project_id, + name="text-config", + config_blob=config_blob, + ) + + # Try to create a new version with type='stt' + version_data = { + "config_blob": { + "completion": { + "provider": "openai", + "type": "stt", + "params": { + "model": "whisper-1", + "instructions": "Transcribe audio", + "temperature": 0.2, + }, + } + }, + "commit_message": "Attempting to change type to stt", + } + + response = client.post( + f"{settings.API_V1_STR}/configs/{config.id}/versions", + headers={"X-API-KEY": user_api_key.key}, + json=version_data, + ) + assert response.status_code == 400 + error_detail = response.json().get("error", "") + assert "cannot change config type" in error_detail.lower() + assert "text" in error_detail + assert "stt" in error_detail + + +def test_create_version_cannot_change_type_from_stt_to_tts( + db: Session, + client: TestClient, + user_api_key: TestAuthContext, +) -> None: + """Test that config type cannot be changed from 'stt' to 'tts' in a new version.""" + from app.models.llm.request import KaapiCompletionConfig + + # Create initial config with type='stt' + config_blob = ConfigBlob( + completion=KaapiCompletionConfig( + provider="openai", + type="stt", + params={ + "model": "whisper-1", + "instructions": "Transcribe audio", + "temperature": 0.2, + }, + ) + ) + config = create_test_config( + db=db, + project_id=user_api_key.project_id, + name="stt-config", + config_blob=config_blob, + ) + + # Try to create a new version with type='tts' + version_data = { + "config_blob": { + "completion": { + "provider": "openai", + "type": "tts", + "params": { + "model": "tts-1", + "voice": "alloy", + "language": "en", + }, + } + }, + "commit_message": "Attempting to change type to tts", + } + + response = client.post( + f"{settings.API_V1_STR}/configs/{config.id}/versions", + headers={"X-API-KEY": user_api_key.key}, + json=version_data, + ) + assert response.status_code == 400 + + +def test_create_version_cannot_change_type_from_tts_to_text( + db: Session, + client: TestClient, + user_api_key: TestAuthContext, +) -> None: + """Test that config type cannot be changed from 'tts' to 'text' in a new version.""" + from app.models.llm.request import KaapiCompletionConfig + + # Create initial config with type='tts' + config_blob = ConfigBlob( + completion=KaapiCompletionConfig( + provider="openai", + type="tts", + params={ + "model": "tts-1", + "voice": "alloy", + "language": "en", + }, + ) + ) + config = create_test_config( + db=db, + project_id=user_api_key.project_id, + name="tts-config", + config_blob=config_blob, + ) + + # Try to create a new version with type='text' + version_data = { + "config_blob": { + "completion": { + "provider": "openai", + "type": "text", + "params": { + "model": "gpt-4", + "temperature": 0.7, + }, + } + }, + "commit_message": "Attempting to change type to text", + } + + response = client.post( + f"{settings.API_V1_STR}/configs/{config.id}/versions", + headers={"X-API-KEY": user_api_key.key}, + json=version_data, + ) + assert response.status_code == 400 + + +def test_create_version_same_type_succeeds( + db: Session, + client: TestClient, + user_api_key: TestAuthContext, +) -> None: + """Test that creating a new version with the same type succeeds.""" + from app.models.llm.request import KaapiCompletionConfig + + # Create initial config with type='text' + config_blob = ConfigBlob( + completion=KaapiCompletionConfig( + provider="openai", + type="text", + params={ + "model": "gpt-4", + "temperature": 0.7, + }, + ) + ) + config = create_test_config( + db=db, + project_id=user_api_key.project_id, + name="text-config", + config_blob=config_blob, + ) + + # Create a new version with the same type='text' + version_data = { + "config_blob": { + "completion": { + "provider": "openai", + "type": "text", + "params": { + "model": "gpt-4-turbo", + "temperature": 0.9, + }, + } + }, + "commit_message": "Updated to gpt-4-turbo with same type", + } + + response = client.post( + f"{settings.API_V1_STR}/configs/{config.id}/versions", + headers={"X-API-KEY": user_api_key.key}, + json=version_data, + ) + assert response.status_code == 201 + data = response.json() + assert data["success"] is True + assert data["data"]["version"] == 2 + assert data["data"]["config_blob"]["completion"]["type"] == "text" + + +def test_create_version_partial_update_params_only( + db: Session, + client: TestClient, + user_api_key: TestAuthContext, +) -> None: + """Test partial update - only updating params, inheriting provider and type.""" + from app.models.llm.request import KaapiCompletionConfig + + # Create initial config + config_blob = ConfigBlob( + completion=KaapiCompletionConfig( + provider="openai", + type="text", + params={ + "model": "gpt-4", + "temperature": 0.7, + }, + ) + ) + config = create_test_config( + db=db, + project_id=user_api_key.project_id, + name="partial-update-test", + config_blob=config_blob, + ) + + # Only send params update - provider and type will be inherited + version_data = { + "config_blob": { + "completion": { + "params": { + "model": "gpt-4-turbo", + "temperature": 0.9, + }, + } + }, + "commit_message": "Only updating model and temperature", + } + + response = client.post( + f"{settings.API_V1_STR}/configs/{config.id}/versions", + headers={"X-API-KEY": user_api_key.key}, + json=version_data, + ) + assert response.status_code == 201 + data = response.json() + assert data["success"] is True + assert data["data"]["version"] == 2 + + config_blob_result = data["data"]["config_blob"] + # Provider and type should be inherited + assert config_blob_result["completion"]["provider"] == "openai" + assert config_blob_result["completion"]["type"] == "text" + # Params should be updated + assert config_blob_result["completion"]["params"]["model"] == "gpt-4-turbo" + assert config_blob_result["completion"]["params"]["temperature"] == 0.9 + + +def test_create_config_with_kaapi_provider_success( + db: Session, + client: TestClient, + user_api_key: TestAuthContext, +) -> None: + """Test creating a config with Kaapi provider (openai) works correctly.""" + config_data = { + "name": "kaapi-text-config", + "description": "A Kaapi configuration for text completion", + "config_blob": { + "completion": { + "provider": "openai", + "type": "text", + "params": { + "model": "gpt-4", + "temperature": 0.7, + }, + } + }, + "commit_message": "Initial Kaapi configuration", + } + + response = client.post( + f"{settings.API_V1_STR}/configs/", + headers={"X-API-KEY": user_api_key.key}, + json=config_data, + ) + assert response.status_code == 201 + data = response.json() + assert data["success"] is True + assert data["data"]["name"] == config_data["name"] + assert data["data"]["version"]["config_blob"]["completion"]["provider"] == "openai" + assert data["data"]["version"]["config_blob"]["completion"]["type"] == "text" + + +def test_create_version_with_kaapi_stt_provider_success( + db: Session, + client: TestClient, + user_api_key: TestAuthContext, +) -> None: + """Test creating STT config and version with Kaapi provider works correctly.""" + from app.models.llm.request import KaapiCompletionConfig + + # Create initial STT config with Kaapi provider + config_blob = ConfigBlob( + completion=KaapiCompletionConfig( + provider="openai", + type="stt", + params={ + "model": "whisper-1", + "instructions": "Transcribe audio accurately", + "temperature": 0.2, + }, + ) + ) + config = create_test_config( + db=db, + project_id=user_api_key.project_id, + name="kaapi-stt-config", + config_blob=config_blob, + ) + + # Create a new version with the same type='stt' + version_data = { + "config_blob": { + "completion": { + "provider": "openai", + "type": "stt", + "params": { + "model": "whisper-1", + "instructions": "Transcribe with high accuracy", + "temperature": 0.1, + }, + } + }, + "commit_message": "Updated STT instructions", + } + + response = client.post( + f"{settings.API_V1_STR}/configs/{config.id}/versions", + headers={"X-API-KEY": user_api_key.key}, + json=version_data, + ) + assert response.status_code == 201 + data = response.json() + assert data["success"] is True + assert data["data"]["version"] == 2 + assert data["data"]["config_blob"]["completion"]["provider"] == "openai" + assert data["data"]["config_blob"]["completion"]["type"] == "stt" + + +def test_create_version_with_kaapi_tts_provider_success( + db: Session, + client: TestClient, + user_api_key: TestAuthContext, +) -> None: + """Test creating TTS config and version with Kaapi provider works correctly.""" + from app.models.llm.request import KaapiCompletionConfig + + # Create initial TTS config with Kaapi provider + config_blob = ConfigBlob( + completion=KaapiCompletionConfig( + provider="openai", + type="tts", + params={ + "model": "tts-1", + "voice": "alloy", + "language": "en", + }, + ) + ) + config = create_test_config( + db=db, + project_id=user_api_key.project_id, + name="kaapi-tts-config", + config_blob=config_blob, + ) + + # Create a new version with the same type='tts' + version_data = { + "config_blob": { + "completion": { + "provider": "openai", + "type": "tts", + "params": { + "model": "tts-1-hd", + "voice": "nova", + "language": "en", + }, + } + }, + "commit_message": "Updated TTS to HD model with nova voice", + } + + response = client.post( + f"{settings.API_V1_STR}/configs/{config.id}/versions", + headers={"X-API-KEY": user_api_key.key}, + json=version_data, + ) + assert response.status_code == 201 + data = response.json() + assert data["success"] is True + assert data["data"]["version"] == 2 + assert data["data"]["config_blob"]["completion"]["provider"] == "openai" + assert data["data"]["config_blob"]["completion"]["type"] == "tts" diff --git a/backend/app/tests/api/routes/test_evaluation.py b/backend/app/tests/api/routes/test_evaluation.py index bf21fc3f0..36222e4d4 100644 --- a/backend/app/tests/api/routes/test_evaluation.py +++ b/backend/app/tests/api/routes/test_evaluation.py @@ -9,7 +9,7 @@ from app.crud.evaluations.batch import build_evaluation_jsonl from app.models import EvaluationDataset, EvaluationRun -from app.models.llm.request import KaapiLLMParams +from app.models.llm.request import TextLLMParams from app.tests.utils.auth import TestAuthContext from app.tests.utils.test_data import create_test_config, create_test_evaluation_dataset @@ -607,7 +607,7 @@ def test_build_batch_jsonl_basic(self) -> None: } ] - config = KaapiLLMParams( + config = TextLLMParams( model="gpt-4o", temperature=0.2, instructions="You are a helpful assistant", @@ -638,7 +638,7 @@ def test_build_batch_jsonl_with_tools(self) -> None: } ] - config = KaapiLLMParams( + config = TextLLMParams( model="gpt-4o-mini", instructions="Search documents", knowledge_base_ids=["vs_abc123"], @@ -662,7 +662,7 @@ def test_build_batch_jsonl_minimal_config(self) -> None: } ] - config = KaapiLLMParams(model="gpt-4o") # Only model provided + config = TextLLMParams(model="gpt-4o") # Only model provided jsonl_data = build_evaluation_jsonl(dataset_items, config) @@ -694,7 +694,7 @@ def test_build_batch_jsonl_skips_empty_questions(self) -> None: }, ] - config = KaapiLLMParams(model="gpt-4o", instructions="Test") + config = TextLLMParams(model="gpt-4o", instructions="Test") jsonl_data = build_evaluation_jsonl(dataset_items, config) @@ -714,7 +714,7 @@ def test_build_batch_jsonl_multiple_items(self) -> None: for i in range(5) ] - config = KaapiLLMParams( + config = TextLLMParams( model="gpt-4o", instructions="Answer questions", ) diff --git a/backend/app/tests/api/routes/test_llm.py b/backend/app/tests/api/routes/test_llm.py index 9313750a0..245ccf738 100644 --- a/backend/app/tests/api/routes/test_llm.py +++ b/backend/app/tests/api/routes/test_llm.py @@ -6,9 +6,7 @@ from app.models.llm.request import ( QueryParams, LLMCallConfig, - CompletionConfig, ConfigBlob, - KaapiLLMParams, KaapiCompletionConfig, NativeCompletionConfig, ) @@ -27,6 +25,7 @@ def test_llm_call_success( blob=ConfigBlob( completion=NativeCompletionConfig( provider="openai-native", + type="text", params={ "model": "gpt-4", "temperature": 0.7, @@ -65,11 +64,12 @@ def test_llm_call_with_kaapi_config( blob=ConfigBlob( completion=KaapiCompletionConfig( provider="openai", - params=KaapiLLMParams( - model="gpt-4o", - instructions="You are a physics expert", - temperature=0.5, - ), + type="text", + params={ + "model": "gpt-4o", + "instructions": "You are a physics expert", + "temperature": 0.5, + }, ) ) ), @@ -100,6 +100,7 @@ def test_llm_call_with_native_config( blob=ConfigBlob( completion=NativeCompletionConfig( provider="openai-native", + type="text", params={ "model": "gpt-4", "temperature": 0.9, diff --git a/backend/app/tests/crud/config/test_config.py b/backend/app/tests/crud/config/test_config.py index 0267c0585..6fc9c7f19 100644 --- a/backend/app/tests/crud/config/test_config.py +++ b/backend/app/tests/crud/config/test_config.py @@ -21,6 +21,7 @@ def example_config_blob(): return ConfigBlob( completion=NativeCompletionConfig( provider="openai-native", + type="text", params={ "model": "gpt-4", "temperature": 0.8, diff --git a/backend/app/tests/crud/config/test_version.py b/backend/app/tests/crud/config/test_version.py index 8c6fa8eaa..0d7812151 100644 --- a/backend/app/tests/crud/config/test_version.py +++ b/backend/app/tests/crud/config/test_version.py @@ -19,6 +19,7 @@ def example_config_blob(): return ConfigBlob( completion=NativeCompletionConfig( provider="openai-native", + type="text", params={ "model": "gpt-4", "temperature": 0.8, diff --git a/backend/app/tests/crud/test_credentials.py b/backend/app/tests/crud/test_credentials.py index 9e1bec372..ca1b1648f 100644 --- a/backend/app/tests/crud/test_credentials.py +++ b/backend/app/tests/crud/test_credentials.py @@ -259,7 +259,7 @@ def test_langfuse_credential_validation(db: Session) -> None: invalid_credentials = { "langfuse": { "public_key": "test-public-key", - "secret_key": "test-secret-key" + "secret_key": "test-secret-key", # Missing host } } diff --git a/backend/app/tests/crud/test_llm.py b/backend/app/tests/crud/test_llm.py new file mode 100644 index 000000000..2251755fd --- /dev/null +++ b/backend/app/tests/crud/test_llm.py @@ -0,0 +1,413 @@ +from uuid import uuid4 + +import pytest +from sqlmodel import Session, select + +from app.crud import JobCrud +from app.crud.llm import ( + create_llm_call, + get_llm_call_by_id, + get_llm_calls_by_job_id, + update_llm_call_response, +) +from app.models import JobType, Project, Organization +from app.models.llm import ( + ConfigBlob, + LLMCallRequest, + LlmCall, + QueryParams, +) +from app.models.llm.request import ( + KaapiCompletionConfig, + LLMCallConfig, +) + + +@pytest.fixture +def test_project(db: Session) -> Project: + """Get the first available test project.""" + project = db.exec(select(Project).limit(1)).first() + assert project is not None, "No test project found in seed data" + return project + + +@pytest.fixture +def test_organization(db: Session, test_project: Project) -> Organization: + """Get the organization for the test project.""" + org = db.get(Organization, test_project.organization_id) + assert org is not None, "No organization found for test project" + return org + + +@pytest.fixture +def test_job(db: Session): + """Create a test job for LLM call tests.""" + crud = JobCrud(db) + return crud.create(job_type=JobType.LLM_API, trace_id="test-llm-trace") + + +@pytest.fixture +def text_config_blob() -> ConfigBlob: + """Create a text completion config blob.""" + return ConfigBlob( + completion=KaapiCompletionConfig( + provider="openai", + params={ + "model": "gpt-4o", + "instructions": "You are a helpful assistant", + "temperature": 0.7, + }, + type="text", + ) + ) + + +@pytest.fixture +def stt_config_blob() -> ConfigBlob: + """Create a speech-to-text config blob.""" + return ConfigBlob( + completion=KaapiCompletionConfig( + provider="openai", + params={ + "model": "whisper-1", + "instructions": "Transcribe", + "input_language": "en", + }, + type="stt", + ) + ) + + +@pytest.fixture +def tts_config_blob() -> ConfigBlob: + """Create a text-to-speech config blob.""" + return ConfigBlob( + completion=KaapiCompletionConfig( + provider="openai", + params={ + "model": "tts-1", + "voice": "alloy", + "language": "en", + }, + type="tts", + ) + ) + + +def test_create_llm_call_text( + db: Session, + test_job, + test_project: Project, + test_organization: Organization, + text_config_blob: ConfigBlob, +) -> None: + """Test creating a text completion LLM call.""" + request = LLMCallRequest( + query=QueryParams(input="Hello, how are you?"), + config=LLMCallConfig(blob=text_config_blob), + ) + + llm_call = create_llm_call( + db, + request=request, + job_id=test_job.id, + project_id=test_project.id, + organization_id=test_organization.id, + resolved_config=text_config_blob, + original_provider="openai", + ) + + assert llm_call.id is not None + assert llm_call.job_id == test_job.id + assert llm_call.project_id == test_project.id + assert llm_call.organization_id == test_organization.id + assert llm_call.input == "Hello, how are you?" + assert llm_call.input_type == "text" + assert llm_call.output_type == "text" + assert llm_call.provider == "openai" + assert llm_call.model == "gpt-4o" + assert llm_call.config is not None + assert "config_blob" in llm_call.config + + +def test_create_llm_call_stt( + db: Session, + test_job, + test_project: Project, + test_organization: Organization, + stt_config_blob: ConfigBlob, +) -> None: + """Test creating a speech-to-text LLM call.""" + request = LLMCallRequest( + query=QueryParams(input="/path/to/audio.wav"), + config=LLMCallConfig(blob=stt_config_blob), + ) + + llm_call = create_llm_call( + db, + request=request, + job_id=test_job.id, + project_id=test_project.id, + organization_id=test_organization.id, + resolved_config=stt_config_blob, + original_provider="openai", + ) + + assert llm_call.input_type == "audio" + assert llm_call.output_type == "text" + assert llm_call.model == "whisper-1" + + +def test_create_llm_call_tts( + db: Session, + test_job, + test_project: Project, + test_organization: Organization, + tts_config_blob: ConfigBlob, +) -> None: + """Test creating a text-to-speech LLM call.""" + request = LLMCallRequest( + query=QueryParams(input="Hello world"), + config=LLMCallConfig(blob=tts_config_blob), + ) + + llm_call = create_llm_call( + db, + request=request, + job_id=test_job.id, + project_id=test_project.id, + organization_id=test_organization.id, + resolved_config=tts_config_blob, + original_provider="openai", + ) + + assert llm_call.input_type == "text" + assert llm_call.output_type == "audio" + assert llm_call.model == "tts-1" + + +def test_create_llm_call_with_stored_config( + db: Session, + test_job, + test_project: Project, + test_organization: Organization, + text_config_blob: ConfigBlob, +) -> None: + """Test creating an LLM call with a stored config reference.""" + config_id = uuid4() + request = LLMCallRequest( + query=QueryParams(input="Test input"), + config=LLMCallConfig(id=config_id, version=1), + ) + + llm_call = create_llm_call( + db, + request=request, + job_id=test_job.id, + project_id=test_project.id, + organization_id=test_organization.id, + resolved_config=text_config_blob, + original_provider="openai", + ) + + assert llm_call.config is not None + assert "config_id" in llm_call.config + assert llm_call.config["config_id"] == str(config_id) + assert llm_call.config["config_version"] == 1 + + +def test_get_llm_call_by_id( + db: Session, + test_job, + test_project: Project, + test_organization: Organization, + text_config_blob: ConfigBlob, +) -> None: + """Test fetching an LLM call by ID.""" + request = LLMCallRequest( + query=QueryParams(input="Test input"), + config=LLMCallConfig(blob=text_config_blob), + ) + + created = create_llm_call( + db, + request=request, + job_id=test_job.id, + project_id=test_project.id, + organization_id=test_organization.id, + resolved_config=text_config_blob, + original_provider="openai", + ) + + fetched = get_llm_call_by_id(db, created.id) + assert fetched is not None + assert fetched.id == created.id + assert fetched.input == "Test input" + + +def test_get_llm_call_by_id_with_project_scope( + db: Session, + test_job, + test_project: Project, + test_organization: Organization, + text_config_blob: ConfigBlob, +) -> None: + """Test fetching an LLM call with project scoping.""" + request = LLMCallRequest( + query=QueryParams(input="Test input"), + config=LLMCallConfig(blob=text_config_blob), + ) + + created = create_llm_call( + db, + request=request, + job_id=test_job.id, + project_id=test_project.id, + organization_id=test_organization.id, + resolved_config=text_config_blob, + original_provider="openai", + ) + + # Should find with correct project + fetched = get_llm_call_by_id(db, created.id, project_id=test_project.id) + assert fetched is not None + + # Should not find with wrong project + fetched_wrong = get_llm_call_by_id(db, created.id, project_id=99999) + assert fetched_wrong is None + + +def test_get_llm_call_by_id_not_found(db: Session) -> None: + """Test fetching a non-existent LLM call.""" + fake_id = uuid4() + result = get_llm_call_by_id(db, fake_id) + assert result is None + + +def test_get_llm_calls_by_job_id( + db: Session, + test_job, + test_project: Project, + test_organization: Organization, + text_config_blob: ConfigBlob, +) -> None: + """Test fetching all LLM calls for a job.""" + # Create multiple LLM calls for the same job + for i in range(3): + request = LLMCallRequest( + query=QueryParams(input=f"Test input {i}"), + config=LLMCallConfig(blob=text_config_blob), + ) + create_llm_call( + db, + request=request, + job_id=test_job.id, + project_id=test_project.id, + organization_id=test_organization.id, + resolved_config=text_config_blob, + original_provider="openai", + ) + + llm_calls = get_llm_calls_by_job_id(db, test_job.id) + assert len(llm_calls) == 3 + + +def test_get_llm_calls_by_job_id_empty(db: Session) -> None: + """Test fetching LLM calls for a job with no calls.""" + fake_job_id = uuid4() + llm_calls = get_llm_calls_by_job_id(db, fake_job_id) + assert llm_calls == [] + + +def test_update_llm_call_response( + db: Session, + test_job, + test_project: Project, + test_organization: Organization, + text_config_blob: ConfigBlob, +) -> None: + """Test updating an LLM call with response data.""" + request = LLMCallRequest( + query=QueryParams(input="Test input"), + config=LLMCallConfig(blob=text_config_blob), + ) + + created = create_llm_call( + db, + request=request, + job_id=test_job.id, + project_id=test_project.id, + organization_id=test_organization.id, + resolved_config=text_config_blob, + original_provider="openai", + ) + + # Update with response data + content = {"text": "This is the response"} + usage = { + "input_tokens": 10, + "output_tokens": 20, + "total_tokens": 30, + "reasoning_tokens": None, + } + + updated = update_llm_call_response( + db, + llm_call_id=created.id, + provider_response_id="resp_123456", + content=content, + usage=usage, + conversation_id="conv_abc", + ) + + assert updated.provider_response_id == "resp_123456" + assert updated.content == content + assert updated.usage == usage + assert updated.conversation_id == "conv_abc" + + +def test_update_llm_call_response_partial( + db: Session, + test_job, + test_project: Project, + test_organization: Organization, + text_config_blob: ConfigBlob, +) -> None: + """Test partial update of an LLM call response.""" + request = LLMCallRequest( + query=QueryParams(input="Test input"), + config=LLMCallConfig(blob=text_config_blob), + ) + + created = create_llm_call( + db, + request=request, + job_id=test_job.id, + project_id=test_project.id, + organization_id=test_organization.id, + resolved_config=text_config_blob, + original_provider="openai", + ) + + # Only update provider_response_id + updated = update_llm_call_response( + db, + llm_call_id=created.id, + provider_response_id="resp_partial", + ) + + assert updated.provider_response_id == "resp_partial" + assert updated.content is None # Should remain None + assert updated.usage is None # Should remain None + + +def test_update_llm_call_response_not_found(db: Session) -> None: + """Test updating a non-existent LLM call.""" + fake_id = uuid4() + + with pytest.raises(ValueError, match=str(fake_id)): + update_llm_call_response( + db, + llm_call_id=fake_id, + provider_response_id="resp_123", + ) diff --git a/backend/app/tests/scripts/test_backend_pre_start.py b/backend/app/tests/scripts/test_backend_pre_start.py index 9b134c3cb..44f810cb6 100644 --- a/backend/app/tests/scripts/test_backend_pre_start.py +++ b/backend/app/tests/scripts/test_backend_pre_start.py @@ -8,8 +8,9 @@ def test_init_success(): mock_session.exec.return_value = None fake_select = MagicMock() - with patch("app.backend_pre_start.Session", return_value=mock_session), patch( - "app.backend_pre_start.select", return_value=fake_select + with ( + patch("app.backend_pre_start.Session", return_value=mock_session), + patch("app.backend_pre_start.select", return_value=fake_select), ): try: init(MagicMock()) diff --git a/backend/app/tests/scripts/test_test_pre_start.py b/backend/app/tests/scripts/test_test_pre_start.py index d7f686940..728e6b6c2 100644 --- a/backend/app/tests/scripts/test_test_pre_start.py +++ b/backend/app/tests/scripts/test_test_pre_start.py @@ -8,8 +8,9 @@ def test_init_success(): mock_session.exec.return_value = None fake_select = MagicMock() - with patch("app.tests_pre_start.Session", return_value=mock_session), patch( - "app.tests_pre_start.select", return_value=fake_select + with ( + patch("app.tests_pre_start.Session", return_value=mock_session), + patch("app.tests_pre_start.select", return_value=fake_select), ): try: init(MagicMock()) diff --git a/backend/app/tests/seed_data/seed_data.py b/backend/app/tests/seed_data/seed_data.py index 33e71a502..0935bbfaf 100644 --- a/backend/app/tests/seed_data/seed_data.py +++ b/backend/app/tests/seed_data/seed_data.py @@ -18,6 +18,8 @@ Credential, Assistant, Document, + Config, + ConfigVersion, ) @@ -348,6 +350,7 @@ def clear_database(session: Session) -> None: session.exec(delete(Assistant)) session.exec(delete(Document)) session.exec(delete(APIKey)) + # ConfigVersion and Config are cascade-deleted when Project is deleted session.exec(delete(Project)) session.exec(delete(Organization)) session.exec(delete(User)) diff --git a/backend/app/tests/services/doctransformer/test_job/conftest.py b/backend/app/tests/services/doctransformer/test_job/conftest.py index 8787db17a..e4f898992 100644 --- a/backend/app/tests/services/doctransformer/test_job/conftest.py +++ b/backend/app/tests/services/doctransformer/test_job/conftest.py @@ -1,6 +1,7 @@ """ Pytest fixtures for document transformation service tests. """ + import os from typing import Any, Callable, Generator, Tuple from unittest.mock import patch diff --git a/backend/app/tests/services/doctransformer/test_job/test_execute_job.py b/backend/app/tests/services/doctransformer/test_job/test_execute_job.py index 97aef7c0e..d4508bd99 100644 --- a/backend/app/tests/services/doctransformer/test_job/test_execute_job.py +++ b/backend/app/tests/services/doctransformer/test_job/test_execute_job.py @@ -48,11 +48,12 @@ def test_execute_job_success( job_crud = DocTransformationJobCrud(session=db, project_id=project.id) job = job_crud.create(DocTransformJobCreate(source_document_id=document.id)) - with patch( - "app.services.doctransform.job.Session" - ) as mock_session_class, patch( - "app.services.doctransform.registry.TRANSFORMERS", - {"test": MockTestTransformer}, + with ( + patch("app.services.doctransform.job.Session") as mock_session_class, + patch( + "app.services.doctransform.registry.TRANSFORMERS", + {"test": MockTestTransformer}, + ), ): mock_session_class.return_value.__enter__.return_value = db mock_session_class.return_value.__exit__.return_value = None @@ -98,11 +99,12 @@ def test_execute_job_with_nonexistent_job( self.setup_aws_s3() nonexistent_job_id = uuid4() - with patch( - "app.services.doctransform.job.Session" - ) as mock_session_class, patch( - "app.services.doctransform.registry.TRANSFORMERS", - {"test": MockTestTransformer}, + with ( + patch("app.services.doctransform.job.Session") as mock_session_class, + patch( + "app.services.doctransform.registry.TRANSFORMERS", + {"test": MockTestTransformer}, + ), ): mock_session_class.return_value.__enter__.return_value = db mock_session_class.return_value.__exit__.return_value = None @@ -138,11 +140,12 @@ def test_execute_job_with_missing_source_document( job_crud = DocTransformationJobCrud(session=db, project_id=project.id) job = job_crud.create(DocTransformJobCreate(source_document_id=document.id)) - with patch( - "app.services.doctransform.job.Session" - ) as mock_session_class, patch( - "app.services.doctransform.registry.TRANSFORMERS", - {"test": MockTestTransformer}, + with ( + patch("app.services.doctransform.job.Session") as mock_session_class, + patch( + "app.services.doctransform.registry.TRANSFORMERS", + {"test": MockTestTransformer}, + ), ): mock_session_class.return_value.__enter__.return_value = db mock_session_class.return_value.__exit__.return_value = None @@ -183,13 +186,13 @@ def test_execute_job_with_transformer_error( job = job_crud.create(DocTransformJobCreate(source_document_id=document.id)) # Mock convert_document to raise TransformationError - with patch( - "app.services.doctransform.job.Session" - ) as mock_session_class, patch( - "app.services.doctransform.job.convert_document" - ) as mock_convert, patch( - "app.services.doctransform.registry.TRANSFORMERS", - {"test": MockTestTransformer}, + with ( + patch("app.services.doctransform.job.Session") as mock_session_class, + patch("app.services.doctransform.job.convert_document") as mock_convert, + patch( + "app.services.doctransform.registry.TRANSFORMERS", + {"test": MockTestTransformer}, + ), ): mock_session_class.return_value.__enter__.return_value = db mock_session_class.return_value.__exit__.return_value = None @@ -227,11 +230,12 @@ def test_execute_job_status_transitions( job = job_crud.create(DocTransformJobCreate(source_document_id=document.id)) initial_status = job.status - with patch( - "app.services.doctransform.job.Session" - ) as mock_session_class, patch( - "app.services.doctransform.registry.TRANSFORMERS", - {"test": MockTestTransformer}, + with ( + patch("app.services.doctransform.job.Session") as mock_session_class, + patch( + "app.services.doctransform.registry.TRANSFORMERS", + {"test": MockTestTransformer}, + ), ): mock_session_class.return_value.__enter__.return_value = db mock_session_class.return_value.__exit__.return_value = None @@ -277,11 +281,12 @@ def test_execute_job_with_different_content_types( job_crud = DocTransformationJobCrud(session=db, project_id=project.id) job = job_crud.create(DocTransformJobCreate(source_document_id=document.id)) - with patch( - "app.services.doctransform.job.Session" - ) as mock_session_class, patch( - "app.services.doctransform.registry.TRANSFORMERS", - {"test": MockTestTransformer}, + with ( + patch("app.services.doctransform.job.Session") as mock_session_class, + patch( + "app.services.doctransform.registry.TRANSFORMERS", + {"test": MockTestTransformer}, + ), ): mock_session_class.return_value.__enter__.return_value = db mock_session_class.return_value.__exit__.return_value = None diff --git a/backend/app/tests/services/doctransformer/test_job/test_execute_job_errors.py b/backend/app/tests/services/doctransformer/test_job/test_execute_job_errors.py index 24da19cbf..344c9133d 100644 --- a/backend/app/tests/services/doctransformer/test_job/test_execute_job_errors.py +++ b/backend/app/tests/services/doctransformer/test_job/test_execute_job_errors.py @@ -41,13 +41,15 @@ def test_execute_job_with_storage_error( job = job_crud.create(DocTransformJobCreate(source_document_id=document.id)) # Mock storage.put to raise an error - with patch( - "app.services.doctransform.job.Session" - ) as mock_session_class, patch( - "app.services.doctransform.job.get_cloud_storage" - ) as mock_storage_class, patch( - "app.services.doctransform.registry.TRANSFORMERS", - {"test": MockTestTransformer}, + with ( + patch("app.services.doctransform.job.Session") as mock_session_class, + patch( + "app.services.doctransform.job.get_cloud_storage" + ) as mock_storage_class, + patch( + "app.services.doctransform.registry.TRANSFORMERS", + {"test": MockTestTransformer}, + ), ): mock_session_class.return_value.__enter__.return_value = db mock_session_class.return_value.__exit__.return_value = None @@ -95,14 +97,16 @@ def test_execute_job_retry_mechanism( # Create a side effect that fails once then succeeds (fast retry will only try 2 times) failing_convert_document = create_failing_convert_document(fail_count=1) - with patch( - "app.services.doctransform.job.Session" - ) as mock_session_class, patch( - "app.services.doctransform.job.convert_document", - side_effect=failing_convert_document, - ), patch( - "app.services.doctransform.registry.TRANSFORMERS", - {"test": MockTestTransformer}, + with ( + patch("app.services.doctransform.job.Session") as mock_session_class, + patch( + "app.services.doctransform.job.convert_document", + side_effect=failing_convert_document, + ), + patch( + "app.services.doctransform.registry.TRANSFORMERS", + {"test": MockTestTransformer}, + ), ): mock_session_class.return_value.__enter__.return_value = db mock_session_class.return_value.__exit__.return_value = None @@ -144,14 +148,16 @@ def test_execute_job_exhausted_retries( create_persistent_failing_convert_document("Persistent error") ) - with patch( - "app.services.doctransform.job.Session" - ) as mock_session_class, patch( - "app.services.doctransform.job.convert_document", - side_effect=persistent_failing_convert_document, - ), patch( - "app.services.doctransform.registry.TRANSFORMERS", - {"test": MockTestTransformer}, + with ( + patch("app.services.doctransform.job.Session") as mock_session_class, + patch( + "app.services.doctransform.job.convert_document", + side_effect=persistent_failing_convert_document, + ), + patch( + "app.services.doctransform.registry.TRANSFORMERS", + {"test": MockTestTransformer}, + ), ): mock_session_class.return_value.__enter__.return_value = db mock_session_class.return_value.__exit__.return_value = None @@ -190,11 +196,12 @@ def test_execute_job_database_error_during_completion( job_crud = DocTransformationJobCrud(session=db, project_id=project.id) job = job_crud.create(DocTransformJobCreate(source_document_id=document.id)) - with patch( - "app.services.doctransform.job.Session" - ) as mock_session_class, patch( - "app.services.doctransform.registry.TRANSFORMERS", - {"test": MockTestTransformer}, + with ( + patch("app.services.doctransform.job.Session") as mock_session_class, + patch( + "app.services.doctransform.registry.TRANSFORMERS", + {"test": MockTestTransformer}, + ), ): mock_session_class.return_value.__enter__.return_value = db mock_session_class.return_value.__exit__.return_value = None diff --git a/backend/app/tests/services/doctransformer/test_job/test_integration.py b/backend/app/tests/services/doctransformer/test_job/test_integration.py index 51a9a3e5c..51cc83899 100644 --- a/backend/app/tests/services/doctransformer/test_job/test_integration.py +++ b/backend/app/tests/services/doctransformer/test_job/test_integration.py @@ -36,12 +36,16 @@ def test_execute_job_end_to_end_workflow( job_crud = DocTransformationJobCrud(session=db, project_id=project.id) job = job_crud.create(DocTransformJobCreate(source_document_id=document.id)) - with patch( - "app.services.doctransform.job.start_low_priority_job", - return_value="fake-task-id", - ), patch("app.services.doctransform.job.Session") as mock_session_class, patch( - "app.services.doctransform.registry.TRANSFORMERS", - {"test": MockTestTransformer}, + with ( + patch( + "app.services.doctransform.job.start_low_priority_job", + return_value="fake-task-id", + ), + patch("app.services.doctransform.job.Session") as mock_session_class, + patch( + "app.services.doctransform.registry.TRANSFORMERS", + {"test": MockTestTransformer}, + ), ): mock_session_class.return_value.__enter__.return_value = db mock_session_class.return_value.__exit__.return_value = None @@ -94,11 +98,12 @@ def test_execute_job_concurrent_jobs( jobs.append(job) for job in jobs: - with patch( - "app.services.doctransform.job.Session" - ) as mock_session_class, patch( - "app.services.doctransform.registry.TRANSFORMERS", - {"test": MockTestTransformer}, + with ( + patch("app.services.doctransform.job.Session") as mock_session_class, + patch( + "app.services.doctransform.registry.TRANSFORMERS", + {"test": MockTestTransformer}, + ), ): mock_session_class.return_value.__enter__.return_value = db mock_session_class.return_value.__exit__.return_value = None @@ -138,11 +143,12 @@ def test_multiple_format_transformations( jobs.append((job, target_format)) for job, target_format in jobs: - with patch( - "app.services.doctransform.job.Session" - ) as mock_session_class, patch( - "app.services.doctransform.registry.TRANSFORMERS", - {"test": MockTestTransformer}, + with ( + patch("app.services.doctransform.job.Session") as mock_session_class, + patch( + "app.services.doctransform.registry.TRANSFORMERS", + {"test": MockTestTransformer}, + ), ): mock_session_class.return_value.__enter__.return_value = db mock_session_class.return_value.__exit__.return_value = None diff --git a/backend/app/tests/services/doctransformer/test_job/utils.py b/backend/app/tests/services/doctransformer/test_job/utils.py index 277c5208c..da0da8106 100644 --- a/backend/app/tests/services/doctransformer/test_job/utils.py +++ b/backend/app/tests/services/doctransformer/test_job/utils.py @@ -4,6 +4,7 @@ This module contains DocTransformTestBase with common AWS S3 setup and utilities. All fixtures are automatically available from conftest.py in the same directory. """ + from pathlib import Path from urllib.parse import urlparse diff --git a/backend/app/tests/services/llm/providers/test_gai.py b/backend/app/tests/services/llm/providers/test_gai.py new file mode 100644 index 000000000..5a4a63df5 --- /dev/null +++ b/backend/app/tests/services/llm/providers/test_gai.py @@ -0,0 +1,572 @@ +""" +Tests for the Google AI provider (STT and TTS). +""" + +import pytest +from unittest.mock import MagicMock +from types import SimpleNamespace + +from app.models.llm import ( + NativeCompletionConfig, + QueryParams, +) +from app.services.llm.providers.gai import GoogleAIProvider + + +def mock_google_response( + text: str = "Transcribed text", + model: str = "gemini-2.5-pro", + response_id: str = "resp_123", +) -> SimpleNamespace: + """Create a mock Google AI response object.""" + usage = SimpleNamespace( + prompt_token_count=50, + candidates_token_count=100, + total_token_count=150, + thoughts_token_count=0, + ) + + response = SimpleNamespace( + response_id=response_id, + model_version=model, + text=text, + usage_metadata=usage, + model_dump=lambda: { + "response_id": response_id, + "model_version": model, + "text": text, + "usage_metadata": { + "prompt_token_count": 50, + "candidates_token_count": 100, + "total_token_count": 150, + "thoughts_token_count": 0, + }, + }, + ) + return response + + +class TestGoogleAIProviderSTT: + """Test cases for GoogleAIProvider STT functionality.""" + + @pytest.fixture + def mock_client(self): + """Create a mock Google AI client.""" + client = MagicMock() + # Mock file upload + mock_file = MagicMock() + mock_file.name = "test_audio.wav" + client.files.upload.return_value = mock_file + return client + + @pytest.fixture + def provider(self, mock_client): + """Create a GoogleAIProvider instance with mock client.""" + return GoogleAIProvider(client=mock_client) + + @pytest.fixture + def stt_config(self): + """Create a basic STT completion config.""" + return NativeCompletionConfig( + provider="google-native", + type="stt", + params={ + "model": "gemini-2.5-pro", + }, + ) + + @pytest.fixture + def query_params(self): + """Create basic query parameters.""" + return QueryParams(input="Test audio input") + + def test_stt_success_with_auto_language( + self, provider, mock_client, stt_config, query_params + ): + """Test successful STT execution with auto language detection.""" + mock_response = mock_google_response(text="Hello world") + mock_client.models.generate_content.return_value = mock_response + + result, error = provider.execute(stt_config, query_params, "/path/to/audio.wav") + + assert error is None + assert result is not None + assert result.response.output.text == "Hello world" + assert result.response.model == "gemini-2.5-pro" + assert result.response.provider == "google-native" + assert result.usage.input_tokens == 50 + assert result.usage.output_tokens == 100 + assert result.usage.total_tokens == 150 + + # Verify file upload and content generation + mock_client.files.upload.assert_called_once_with(file="/path/to/audio.wav") + mock_client.models.generate_content.assert_called_once() + + # Verify instruction contains auto-detect + call_args = mock_client.models.generate_content.call_args + assert "Detect the spoken language automatically" in call_args[1]["contents"][0] + + def test_stt_with_specific_input_language( + self, provider, mock_client, stt_config, query_params + ): + """Test STT with specific input language.""" + stt_config.params["input_language"] = "English" + + mock_response = mock_google_response(text="Transcribed English text") + mock_client.models.generate_content.return_value = mock_response + + result, error = provider.execute(stt_config, query_params, "/path/to/audio.wav") + + assert error is None + assert result is not None + + # Verify instruction contains specific language + call_args = mock_client.models.generate_content.call_args + assert "Transcribe the audio from English" in call_args[1]["contents"][0] + + def test_stt_with_translation( + self, provider, mock_client, stt_config, query_params + ): + """Test STT with translation to different output language.""" + stt_config.params["input_language"] = "Spanish" + stt_config.params["output_language"] = "English" + + mock_response = mock_google_response(text="Translated text") + mock_client.models.generate_content.return_value = mock_response + + result, error = provider.execute(stt_config, query_params, "/path/to/audio.wav") + + assert error is None + assert result is not None + + # Verify instruction contains translation + call_args = mock_client.models.generate_content.call_args + instruction = call_args[1]["contents"][0] + assert "Transcribe the audio from Spanish" in instruction + assert "translate to English" in instruction + + def test_stt_with_custom_instructions( + self, provider, mock_client, stt_config, query_params + ): + """Test STT with custom instructions.""" + stt_config.params["instructions"] = "Include timestamps" + + mock_response = mock_google_response(text="Transcribed with timestamps") + mock_client.models.generate_content.return_value = mock_response + + result, error = provider.execute(stt_config, query_params, "/path/to/audio.wav") + + assert error is None + assert result is not None + + # Verify custom instructions are included + call_args = mock_client.models.generate_content.call_args + instruction = call_args[1]["contents"][0] + assert "Include timestamps" in instruction + + def test_stt_with_include_provider_raw_response( + self, provider, mock_client, stt_config, query_params + ): + """Test STT with include_provider_raw_response=True.""" + mock_response = mock_google_response(text="Raw response test") + mock_client.models.generate_content.return_value = mock_response + + result, error = provider.execute( + stt_config, + query_params, + "/path/to/audio.wav", + include_provider_raw_response=True, + ) + + assert error is None + assert result is not None + assert result.provider_raw_response is not None + assert isinstance(result.provider_raw_response, dict) + assert result.provider_raw_response["text"] == "Raw response test" + + def test_stt_missing_model_parameter(self, provider, mock_client, query_params): + """Test error handling when model parameter is missing.""" + stt_config = NativeCompletionConfig( + provider="google-native", + type="stt", + params={}, # Missing model + ) + + result, error = provider.execute(stt_config, query_params, "/path/to/audio.wav") + + assert result is None + assert error is not None + assert "Missing 'model' in native params" in error + + def test_stt_with_type_error(self, provider, mock_client, stt_config, query_params): + """Test handling of TypeError (invalid parameters).""" + mock_client.models.generate_content.side_effect = TypeError( + "unexpected keyword argument 'invalid_param'" + ) + + result, error = provider.execute(stt_config, query_params, "/path/to/audio.wav") + + assert result is None + assert error is not None + assert "Invalid or unexpected parameter in Config" in error + + def test_stt_with_generic_exception( + self, provider, mock_client, stt_config, query_params + ): + """Test handling of unexpected exceptions.""" + mock_client.files.upload.side_effect = Exception("File upload failed") + + result, error = provider.execute(stt_config, query_params, "/path/to/audio.wav") + + assert result is None + assert error is not None + assert "Unexpected error occurred" in error + + def test_stt_with_invalid_input_type( + self, provider, mock_client, stt_config, query_params + ): + """Test STT execution with invalid input type (non-string).""" + # Pass a dict instead of a string path + invalid_input = {"invalid": "data"} + + result, error = provider.execute(stt_config, query_params, invalid_input) + + assert result is None + assert error is not None + assert "STT requires file path as string" in error + + def test_stt_with_valid_file_path( + self, provider, mock_client, stt_config, query_params + ): + """Test STT execution with valid file path string.""" + mock_response = mock_google_response(text="Valid transcription") + mock_client.models.generate_content.return_value = mock_response + + result, error = provider.execute(stt_config, query_params, "/path/to/audio.wav") + + assert error is None + assert result is not None + assert result.response.output.text == "Valid transcription" + + +def mock_google_tts_response( + audio_data: bytes = b"fake_audio_data", + model: str = "gemini-2.5-pro-tts", + response_id: str = "resp_tts_123", +) -> SimpleNamespace: + """Create a mock Google AI TTS response object.""" + usage = SimpleNamespace( + prompt_token_count=20, + candidates_token_count=0, + total_token_count=20, + thoughts_token_count=0, + ) + + inline_data = SimpleNamespace(data=audio_data) + part = SimpleNamespace(inline_data=inline_data) + content = SimpleNamespace(parts=[part]) + candidate = SimpleNamespace(content=content) + + response = SimpleNamespace( + response_id=response_id, + model_version=model, + candidates=[candidate], + usage_metadata=usage, + model_dump=lambda: { + "response_id": response_id, + "model_version": model, + "usage_metadata": { + "prompt_token_count": 20, + "candidates_token_count": 0, + "total_token_count": 20, + "thoughts_token_count": 0, + }, + }, + ) + return response + + +class TestGoogleAIProviderTTS: + """Test cases for GoogleAIProvider TTS functionality.""" + + @pytest.fixture + def mock_client(self): + """Create a mock Google AI client.""" + return MagicMock() + + @pytest.fixture + def provider(self, mock_client): + """Create a GoogleAIProvider instance with mock client.""" + return GoogleAIProvider(client=mock_client) + + @pytest.fixture + def tts_config(self): + """Create a basic TTS completion config.""" + return NativeCompletionConfig( + provider="google-native", + type="tts", + params={ + "model": "gemini-2.5-pro-tts", + "voice": "Aoede", + "language": "en-US", + "response_format": "mp3", + "speed": 1.0, + }, + ) + + @pytest.fixture + def query_params(self): + """Create basic query parameters.""" + return QueryParams(input="Hello world. This is a test.") + + def test_tts_success_basic(self, provider, mock_client, tts_config, query_params): + """Test successful TTS execution with basic parameters.""" + mock_response = mock_google_tts_response(audio_data=b"audio_bytes_123") + mock_client.models.generate_content.return_value = mock_response + + result, error = provider.execute( + tts_config, query_params, "Hello world. This is a test." + ) + + assert error is None + assert result is not None + assert result.response.output.audio_base64 is not None + assert result.response.output.audio_format == "mp3" + assert result.response.model == "gemini-2.5-pro-tts" + assert result.response.provider == "google-native" + assert result.usage.input_tokens == 20 + assert result.usage.total_tokens == 20 + + # Verify API call + mock_client.models.generate_content.assert_called_once() + call_args = mock_client.models.generate_content.call_args + assert call_args[1]["model"] == "gemini-2.5-pro-tts" + assert call_args[1]["contents"] == "Hello world. This is a test." + + def test_tts_with_gemini_specific_params( + self, provider, mock_client, tts_config, query_params + ): + """Test TTS with Gemini-specific provider params (director notes).""" + tts_config.params["provider_specific"] = { + "gemini": { + "director_notes": "Speak professionally and calmly", + "pitch": 2.0, # Not supported yet, should log warning + "volume_gain_db": -5.0, # Not supported yet, should log warning + } + } + + mock_response = mock_google_tts_response() + mock_client.models.generate_content.return_value = mock_response + + result, error = provider.execute( + tts_config, query_params, "Test with director notes" + ) + + assert error is None + assert result is not None + + # Verify config was built with director notes (supported) + call_args = mock_client.models.generate_content.call_args + config = call_args[1]["config"] + assert config.system_instruction == "Speak professionally and calmly" + # Note: pitch and volume_gain_db are not supported in current SDK + # They should generate warnings but not cause errors + + def test_tts_format_mapping(self, provider, mock_client, query_params): + """Test that response format parameter is accepted (but not yet supported by SDK).""" + test_cases = ["mp3", "wav", "ogg"] + + for kaapi_format in test_cases: + mock_client.reset_mock() + mock_response = mock_google_tts_response() + mock_client.models.generate_content.return_value = mock_response + + config = NativeCompletionConfig( + provider="google-native", + type="tts", + params={ + "model": "gemini-2.5-pro-tts", + "voice": "Kore", + "language": "en-US", + "response_format": kaapi_format, + }, + ) + + result, error = provider.execute(config, query_params, "Test audio format") + + # Should succeed but log warning (format selection not yet supported) + assert error is None + assert result is not None + + def test_tts_with_different_voices(self, provider, mock_client, query_params): + """Test TTS with different voice options.""" + voices = ["Aoede", "Kore", "Fenrir", "Leda", "Charon"] + + for voice in voices: + mock_client.reset_mock() + mock_response = mock_google_tts_response() + mock_client.models.generate_content.return_value = mock_response + + config = NativeCompletionConfig( + provider="google-native", + type="tts", + params={ + "model": "gemini-2.5-pro-tts", + "voice": voice, + "language": "en-US", + }, + ) + + result, error = provider.execute(config, query_params, "Test voice") + + assert error is None + call_args = mock_client.models.generate_content.call_args + assert ( + call_args[1][ + "config" + ].speech_config.voice_config.prebuilt_voice_config.voice_name + == voice + ) + + def test_tts_with_speed_variations(self, provider, mock_client, query_params): + """Test TTS with different speed settings (not yet supported by SDK).""" + speeds = [0.25, 0.5, 1.0, 2.0, 4.0] + + for speed in speeds: + mock_client.reset_mock() + mock_response = mock_google_tts_response() + mock_client.models.generate_content.return_value = mock_response + + config = NativeCompletionConfig( + provider="google-native", + type="tts", + params={ + "model": "gemini-2.5-pro-tts", + "voice": "Aoede", + "language": "en-US", + "speed": speed, + }, + ) + + result, error = provider.execute(config, query_params, "Test speed") + + # Should succeed but log warning for non-default speeds + # (speed parameter not yet supported by current SDK) + assert error is None + assert result is not None + + def test_tts_missing_model(self, provider, mock_client, query_params): + """Test error handling when model parameter is missing.""" + config = NativeCompletionConfig( + provider="google-native", + type="tts", + params={ + "voice": "Aoede", + "language": "en-US", + }, + ) + + result, error = provider.execute(config, query_params, "Test text") + + assert result is None + assert error is not None + assert "Missing 'model' in native params" in error + + def test_tts_missing_voice(self, provider, mock_client, query_params): + """Test error handling when voice parameter is missing.""" + config = NativeCompletionConfig( + provider="google-native", + type="tts", + params={ + "model": "gemini-2.5-pro-tts", + "language": "en-US", + }, + ) + + result, error = provider.execute(config, query_params, "Test text") + + assert result is None + assert error is not None + assert "Missing 'voice' in native params" in error + + def test_tts_missing_language(self, provider, mock_client, query_params): + """Test error handling when language parameter is missing.""" + config = NativeCompletionConfig( + provider="google-native", + type="tts", + params={ + "model": "gemini-2.5-pro-tts", + "voice": "Aoede", + }, + ) + + result, error = provider.execute(config, query_params, "Test text") + + assert result is None + assert error is not None + assert "Missing 'language' in native params" in error + + def test_tts_empty_text_input( + self, provider, mock_client, tts_config, query_params + ): + """Test error handling for empty text input.""" + result, error = provider.execute(tts_config, query_params, "") + + assert result is None + assert error is not None + assert "Text input cannot be empty" in error + + def test_tts_invalid_input_type( + self, provider, mock_client, tts_config, query_params + ): + """Test error handling for non-string input.""" + result, error = provider.execute(tts_config, query_params, {"invalid": "data"}) + + assert result is None + assert error is not None + assert "TTS requires text string as input" in error + + def test_tts_with_include_provider_raw_response( + self, provider, mock_client, tts_config, query_params + ): + """Test TTS with include_provider_raw_response=True.""" + mock_response = mock_google_tts_response() + mock_client.models.generate_content.return_value = mock_response + + result, error = provider.execute( + tts_config, + query_params, + "Test raw response", + include_provider_raw_response=True, + ) + + assert error is None + assert result is not None + assert result.provider_raw_response is not None + assert isinstance(result.provider_raw_response, dict) + + def test_tts_api_error(self, provider, mock_client, tts_config, query_params): + """Test handling of API errors during TTS generation.""" + mock_client.models.generate_content.side_effect = Exception( + "API rate limit exceeded" + ) + + result, error = provider.execute(tts_config, query_params, "Test text") + + assert result is None + assert error is not None + assert "Unexpected error occurred" in error + + def test_tts_missing_audio_data( + self, provider, mock_client, tts_config, query_params + ): + """Test error handling when response is missing audio data.""" + # Create response with no audio data + mock_response = mock_google_tts_response(audio_data=None) + mock_client.models.generate_content.return_value = mock_response + + result, error = provider.execute(tts_config, query_params, "Test text") + + assert result is None + assert error is not None + assert "missing audio data" in error.lower() diff --git a/backend/app/tests/services/llm/providers/test_openai.py b/backend/app/tests/services/llm/providers/test_openai.py index 4dfb671e6..05707830e 100644 --- a/backend/app/tests/services/llm/providers/test_openai.py +++ b/backend/app/tests/services/llm/providers/test_openai.py @@ -1,6 +1,7 @@ """ Tests for the OpenAI provider. """ + import pytest from unittest.mock import MagicMock, patch @@ -11,8 +12,9 @@ QueryParams, ) from app.models.llm.request import ConversationConfig -from app.services.llm.providers.openai import OpenAIProvider -from app.tests.utils.llm_provider import mock_openai_response + +from app.services.llm.providers.oai import OpenAIProvider +from app.tests.utils.openai import mock_openai_response class TestOpenAIProvider: @@ -33,6 +35,7 @@ def completion_config(self): """Create a basic completion config.""" return NativeCompletionConfig( provider="openai-native", + type="text", params={"model": "gpt-4"}, ) @@ -53,7 +56,7 @@ def test_execute_success_without_conversation( mock_response = mock_openai_response(text="Test response", model="gpt-4") mock_client.responses.create.return_value = mock_response - result, error = provider.execute(completion_config, query_params) + result, error = provider.execute(completion_config, query_params, "Test query") assert error is None assert result is not None @@ -82,7 +85,7 @@ def test_execute_with_existing_conversation_id( ) mock_client.responses.create.return_value = mock_response - result, error = provider.execute(completion_config, query_params) + result, error = provider.execute(completion_config, query_params, "Test query") assert error is None assert result is not None @@ -93,7 +96,11 @@ def test_execute_with_existing_conversation_id( assert call_args[1]["conversation"] == {"id": conversation_id} def test_execute_with_auto_create_conversation( - self, provider, mock_client, completion_config, query_params + self, + provider, + mock_client, + completion_config, + query_params, ): """Test execution with auto-create conversation.""" new_conversation_id = "conv_auto_456" @@ -110,7 +117,7 @@ def test_execute_with_auto_create_conversation( ) mock_client.responses.create.return_value = mock_response - result, error = provider.execute(completion_config, query_params) + result, error = provider.execute(completion_config, query_params, "Test query") assert error is None assert result is not None @@ -133,7 +140,10 @@ def test_execute_with_include_provider_raw_response( mock_client.responses.create.return_value = mock_response result, error = provider.execute( - completion_config, query_params, include_provider_raw_response=True + completion_config, + query_params, + "Test query", + include_provider_raw_response=True, ) assert error is None @@ -150,7 +160,7 @@ def test_execute_with_type_error( "unexpected keyword argument 'invalid_param'" ) - result, error = provider.execute(completion_config, query_params) + result, error = provider.execute(completion_config, query_params, "Test query") assert result is None assert error is not None @@ -170,7 +180,9 @@ def test_execute_with_openai_api_error( with patch("app.utils.handle_openai_error") as mock_handler: mock_handler.return_value = "API request failed: rate limit exceeded" - result, error = provider.execute(completion_config, query_params) + result, error = provider.execute( + completion_config, query_params, "Test query" + ) assert result is None assert error is not None @@ -183,7 +195,7 @@ def test_execute_with_generic_exception( """Test handling of unexpected exceptions.""" mock_client.responses.create.side_effect = Exception("Timeout occurred") - result, error = provider.execute(completion_config, query_params) + result, error = provider.execute(completion_config, query_params, "Test query") assert result is None assert error is not None @@ -198,7 +210,7 @@ def test_execute_with_conversation_config_without_id_or_auto_create( mock_response = mock_openai_response(text="Test response", model="gpt-4") mock_client.responses.create.return_value = mock_response - result, error = provider.execute(completion_config, query_params) + result, error = provider.execute(completion_config, query_params, "Test query") assert error is None assert result is not None @@ -216,7 +228,7 @@ def test_execute_merges_params_correctly( mock_response = mock_openai_response(text="Test response", model="gpt-4") mock_client.responses.create.return_value = mock_response - result, error = provider.execute(completion_config, query_params) + result, error = provider.execute(completion_config, query_params, "Test query") assert error is None assert result is not None @@ -235,13 +247,14 @@ def test_execute_with_conversation_parameter_removed_when_no_config( # Create a config with conversation in params (should be removed) completion_config = NativeCompletionConfig( provider="openai-native", + type="text", params={"model": "gpt-4", "conversation": {"id": "old_conv"}}, ) mock_response = mock_openai_response(text="Test response", model="gpt-4") mock_client.responses.create.return_value = mock_response - result, error = provider.execute(completion_config, query_params) + result, error = provider.execute(completion_config, query_params, "Test query") assert error is None assert result is not None diff --git a/backend/app/tests/services/llm/providers/test_registry.py b/backend/app/tests/services/llm/providers/test_registry.py index c05222747..b3daa44c4 100644 --- a/backend/app/tests/services/llm/providers/test_registry.py +++ b/backend/app/tests/services/llm/providers/test_registry.py @@ -1,6 +1,7 @@ """ Tests for the LLM provider registry. """ + import pytest from unittest.mock import patch @@ -8,7 +9,7 @@ from openai import OpenAI from app.services.llm.providers.base import BaseProvider -from app.services.llm.providers.openai import OpenAIProvider +from app.services.llm.providers.oai import OpenAIProvider from app.services.llm.providers.registry import ( LLMProvider, get_llm_provider, diff --git a/backend/app/tests/services/llm/test_jobs.py b/backend/app/tests/services/llm/test_jobs.py index 0aa3ad1f0..b28062642 100644 --- a/backend/app/tests/services/llm/test_jobs.py +++ b/backend/app/tests/services/llm/test_jobs.py @@ -16,7 +16,7 @@ LLMResponse, LLMOutput, Usage, - KaapiLLMParams, + # KaapiLLMParams, KaapiCompletionConfig, ) from app.models.llm.request import ConfigBlob, LLMCallConfig @@ -41,6 +41,7 @@ def llm_call_request(self): blob=ConfigBlob( completion=NativeCompletionConfig( provider="openai-native", + type="text", params={"model": "gpt-4"}, ) ) @@ -121,9 +122,10 @@ def test_handle_job_error(self, db: Session): callback_url = "https://example.com/callback" callback_response = APIResponse.failure_response(error="Test error occurred") - with patch("app.services.llm.jobs.Session") as mock_session_class, patch( - "app.services.llm.jobs.send_callback" - ) as mock_send_callback: + with ( + patch("app.services.llm.jobs.Session") as mock_session_class, + patch("app.services.llm.jobs.send_callback") as mock_send_callback, + ): mock_session_class.return_value.__enter__.return_value = db mock_session_class.return_value.__exit__.return_value = None @@ -158,9 +160,10 @@ def test_handle_job_error_without_callback_url(self, db: Session): callback_response = APIResponse.failure_response(error="Test error occurred") - with patch("app.services.llm.jobs.Session") as mock_session_class, patch( - "app.services.llm.jobs.send_callback" - ) as mock_send_callback: + with ( + patch("app.services.llm.jobs.Session") as mock_session_class, + patch("app.services.llm.jobs.send_callback") as mock_send_callback, + ): mock_session_class.return_value.__enter__.return_value = db mock_session_class.return_value.__exit__.return_value = None @@ -189,9 +192,10 @@ def test_handle_job_error_callback_failure_still_updates_job(self, db: Session): error="Test error with callback failure" ) - with patch("app.services.llm.jobs.Session") as mock_session_class, patch( - "app.services.llm.jobs.send_callback" - ) as mock_send_callback: + with ( + patch("app.services.llm.jobs.Session") as mock_session_class, + patch("app.services.llm.jobs.send_callback") as mock_send_callback, + ): mock_session_class.return_value.__enter__.return_value = db mock_session_class.return_value.__exit__.return_value = None @@ -225,6 +229,7 @@ def request_data(self): "config": { "blob": { "completion": { + "type": "text", "provider": "openai-native", "params": {"model": "gpt-4"}, } @@ -400,6 +405,7 @@ def test_stored_config_success(self, db, job_for_execution, mock_llm_response): config_blob = ConfigBlob( completion=NativeCompletionConfig( provider="openai-native", + type="text", params={"model": "gpt-4", "temperature": 0.7}, ) ) @@ -449,6 +455,7 @@ def test_stored_config_with_callback( config_blob = ConfigBlob( completion=NativeCompletionConfig( provider="openai-native", + type="text", params={"model": "gpt-3.5-turbo", "temperature": 0.5}, ) ) @@ -497,6 +504,7 @@ def test_stored_config_version_not_found(self, db, job_for_execution): config_blob = ConfigBlob( completion=NativeCompletionConfig( provider="openai-native", + type="text", params={"model": "gpt-4"}, ) ) @@ -532,11 +540,12 @@ def test_kaapi_config_success(self, db, job_for_execution, mock_llm_response): config_blob = ConfigBlob( completion=KaapiCompletionConfig( provider="openai", - params=KaapiLLMParams( - model="gpt-4", - temperature=0.7, - instructions="You are a helpful assistant", - ), + type="text", + params={ + "model": "gpt-4", + "temperature": 0.7, + "instructions": "You are a helpful assistant", + }, ) ) config = create_test_config(db, project_id=project.id, config_blob=config_blob) @@ -578,10 +587,12 @@ def test_kaapi_config_with_callback(self, db, job_for_execution, mock_llm_respon config_blob = ConfigBlob( completion=KaapiCompletionConfig( provider="openai", - params=KaapiLLMParams( - model="gpt-3.5-turbo", - temperature=0.5, - ), + type="text", + params={ + "model": "gpt-3.5-turbo", + "temperature": 0.7, + "instructions": "You are a helpful assistant", + }, ) ) config = create_test_config(db, project_id=project.id, config_blob=config_blob) @@ -628,10 +639,11 @@ def test_kaapi_config_warnings_passed_through_metadata( config_blob = ConfigBlob( completion=KaapiCompletionConfig( provider="openai", - params=KaapiLLMParams( - model="o1", # Reasoning model - temperature=0.7, # This will be suppressed with warning - ), + type="text", + params={ + "model": "o1", # Reasoning model + "temperature": 0.7, # This will be suppressed with warning + }, ) ) config = create_test_config(db, project_id=project.id, config_blob=config_blob) @@ -677,10 +689,11 @@ def test_kaapi_config_warnings_merged_with_existing_metadata( config_blob = ConfigBlob( completion=KaapiCompletionConfig( provider="openai", - params=KaapiLLMParams( - model="gpt-4", # Non-reasoning model - reasoning="high", # This will be suppressed with warning - ), + type="text", + params={ + "model": "gpt-4", # Non-reasoning model + "reasoning": "high", # This will be suppressed with warning + }, ) ) config = create_test_config(db, project_id=project.id, config_blob=config_blob) @@ -730,6 +743,7 @@ def test_resolve_config_blob_success(self, db: Session): config_blob = ConfigBlob( completion=NativeCompletionConfig( provider="openai-native", + type="text", params={"model": "gpt-4", "temperature": 0.8}, ) ) @@ -756,6 +770,7 @@ def test_resolve_config_blob_version_not_found(self, db: Session): config_blob = ConfigBlob( completion=NativeCompletionConfig( provider="openai-native", + type="text", params={"model": "gpt-4"}, ) ) @@ -781,6 +796,7 @@ def test_resolve_config_blob_invalid_blob_data(self, db: Session): config_blob = ConfigBlob( completion=NativeCompletionConfig( provider="openai-native", + type="text", params={"model": "gpt-4"}, ) ) @@ -818,6 +834,7 @@ def test_resolve_config_blob_with_multiple_versions(self, db: Session): config_blob_v1 = ConfigBlob( completion=NativeCompletionConfig( provider="openai-native", + type="text", params={"model": "gpt-3.5-turbo", "temperature": 0.5}, ) ) @@ -833,6 +850,7 @@ def test_resolve_config_blob_with_multiple_versions(self, db: Session): config_blob_v2 = ConfigBlob( completion=NativeCompletionConfig( provider="openai-native", + type="text", params={"model": "gpt-4", "temperature": 0.9}, ) ) @@ -872,11 +890,12 @@ def test_resolve_kaapi_config_blob_success(self, db: Session): config_blob = ConfigBlob( completion=KaapiCompletionConfig( provider="openai", - params=KaapiLLMParams( - model="gpt-4", - temperature=0.8, - instructions="You are a helpful assistant", - ), + type="text", + params={ + "model": "gpt-4", + "temperature": 0.8, + "instructions": "You are a helpful assistant", + }, ) ) config = create_test_config(db, project_id=project.id, config_blob=config_blob) @@ -893,10 +912,10 @@ def test_resolve_kaapi_config_blob_success(self, db: Session): assert resolved_blob is not None assert isinstance(resolved_blob.completion, KaapiCompletionConfig) assert resolved_blob.completion.provider == "openai" - assert resolved_blob.completion.params.model == "gpt-4" - assert resolved_blob.completion.params.temperature == 0.8 + assert resolved_blob.completion.params["model"] == "gpt-4" + assert resolved_blob.completion.params["temperature"] == 0.8 assert ( - resolved_blob.completion.params.instructions + resolved_blob.completion.params["instructions"] == "You are a helpful assistant" ) @@ -908,6 +927,7 @@ def test_resolve_both_native_and_kaapi_configs(self, db: Session): native_blob = ConfigBlob( completion=NativeCompletionConfig( provider="openai-native", + type="text", params={"model": "gpt-3.5-turbo", "temperature": 0.5}, ) ) @@ -919,10 +939,11 @@ def test_resolve_both_native_and_kaapi_configs(self, db: Session): kaapi_blob = ConfigBlob( completion=KaapiCompletionConfig( provider="openai", - params=KaapiLLMParams( - model="gpt-4", - temperature=0.7, - ), + type="text", + params={ + "model": "gpt-4", + "temperature": 0.7, + }, ) ) kaapi_config = create_test_config( diff --git a/backend/app/tests/services/llm/test_mappers.py b/backend/app/tests/services/llm/test_mappers.py index c020753d2..2ecbcd7b2 100644 --- a/backend/app/tests/services/llm/test_mappers.py +++ b/backend/app/tests/services/llm/test_mappers.py @@ -3,11 +3,19 @@ Tests the transformation of Kaapi-abstracted parameters to provider-native formats. """ + import pytest -from app.models.llm import KaapiLLMParams, KaapiCompletionConfig, NativeCompletionConfig +from app.models.llm.request import ( + TextLLMParams, + STTLLMParams, + TTSLLMParams, + KaapiCompletionConfig, + NativeCompletionConfig, +) from app.services.llm.mappers import ( map_kaapi_to_openai_params, + map_kaapi_to_google_params, transform_kaapi_config_to_native, ) @@ -17,21 +25,25 @@ class TestMapKaapiToOpenAIParams: def test_basic_model_mapping(self): """Test basic model parameter mapping.""" - kaapi_params = KaapiLLMParams(model="gpt-4o") + kaapi_params = TextLLMParams(model="gpt-4o") - result, warnings = map_kaapi_to_openai_params(kaapi_params) + result, warnings = map_kaapi_to_openai_params( + kaapi_params.model_dump(exclude_none=True) + ) assert result == {"model": "gpt-4o"} assert warnings == [] def test_instructions_mapping(self): """Test instructions parameter mapping.""" - kaapi_params = KaapiLLMParams( + kaapi_params = TextLLMParams( model="gpt-4", instructions="You are a helpful assistant.", ) - result, warnings = map_kaapi_to_openai_params(kaapi_params) + result, warnings = map_kaapi_to_openai_params( + kaapi_params.model_dump(exclude_none=True) + ) assert result["model"] == "gpt-4" assert result["instructions"] == "You are a helpful assistant." @@ -39,12 +51,14 @@ def test_instructions_mapping(self): def test_temperature_mapping(self): """Test temperature parameter mapping for non-reasoning models.""" - kaapi_params = KaapiLLMParams( + kaapi_params = TextLLMParams( model="gpt-4", temperature=0.7, ) - result, warnings = map_kaapi_to_openai_params(kaapi_params) + result, warnings = map_kaapi_to_openai_params( + kaapi_params.model_dump(exclude_none=True) + ) assert result["model"] == "gpt-4" assert result["temperature"] == 0.7 @@ -52,24 +66,28 @@ def test_temperature_mapping(self): def test_temperature_zero_mapping(self): """Test that temperature=0 is correctly mapped (edge case).""" - kaapi_params = KaapiLLMParams( + kaapi_params = TextLLMParams( model="gpt-4", temperature=0.0, ) - result, warnings = map_kaapi_to_openai_params(kaapi_params) + result, warnings = map_kaapi_to_openai_params( + kaapi_params.model_dump(exclude_none=True) + ) assert result["temperature"] == 0.0 assert warnings == [] def test_reasoning_mapping_for_reasoning_models(self): """Test reasoning parameter mapping to OpenAI format for reasoning-capable models.""" - kaapi_params = KaapiLLMParams( + kaapi_params = TextLLMParams( model="o1", reasoning="high", ) - result, warnings = map_kaapi_to_openai_params(kaapi_params) + result, warnings = map_kaapi_to_openai_params( + kaapi_params.model_dump(exclude_none=True) + ) assert result["model"] == "o1" assert result["reasoning"] == {"effort": "high"} @@ -77,12 +95,14 @@ def test_reasoning_mapping_for_reasoning_models(self): def test_knowledge_base_ids_mapping(self): """Test knowledge_base_ids mapping to OpenAI tools format.""" - kaapi_params = KaapiLLMParams( + kaapi_params = TextLLMParams( model="gpt-4", knowledge_base_ids=["vs_abc123", "vs_def456"], ) - result, warnings = map_kaapi_to_openai_params(kaapi_params) + result, warnings = map_kaapi_to_openai_params( + kaapi_params.model_dump(exclude_none=True) + ) assert result["model"] == "gpt-4" assert "tools" in result @@ -94,20 +114,22 @@ def test_knowledge_base_ids_mapping(self): def test_knowledge_base_with_max_num_results(self): """Test knowledge_base_ids with custom max_num_results.""" - kaapi_params = KaapiLLMParams( + kaapi_params = TextLLMParams( model="gpt-4", knowledge_base_ids=["vs_abc123"], max_num_results=50, ) - result, warnings = map_kaapi_to_openai_params(kaapi_params) + result, warnings = map_kaapi_to_openai_params( + kaapi_params.model_dump(exclude_none=True) + ) assert result["tools"][0]["max_num_results"] == 50 assert warnings == [] def test_complete_parameter_mapping(self): """Test mapping all compatible parameters together.""" - kaapi_params = KaapiLLMParams( + kaapi_params = TextLLMParams( model="gpt-4o", instructions="You are an expert assistant.", temperature=0.8, @@ -115,7 +137,9 @@ def test_complete_parameter_mapping(self): max_num_results=30, ) - result, warnings = map_kaapi_to_openai_params(kaapi_params) + result, warnings = map_kaapi_to_openai_params( + kaapi_params.model_dump(exclude_none=True) + ) assert result["model"] == "gpt-4o" assert result["instructions"] == "You are an expert assistant." @@ -127,12 +151,14 @@ def test_complete_parameter_mapping(self): def test_reasoning_suppressed_for_non_reasoning_models(self): """Test that reasoning is suppressed with warning for non-reasoning models.""" - kaapi_params = KaapiLLMParams( + kaapi_params = TextLLMParams( model="gpt-4", reasoning="high", ) - result, warnings = map_kaapi_to_openai_params(kaapi_params) + result, warnings = map_kaapi_to_openai_params( + kaapi_params.model_dump(exclude_none=True) + ) assert result["model"] == "gpt-4" assert "reasoning" not in result @@ -142,13 +168,15 @@ def test_reasoning_suppressed_for_non_reasoning_models(self): def test_temperature_suppressed_for_reasoning_models(self): """Test that temperature is suppressed with warning for reasoning models when reasoning is set.""" - kaapi_params = KaapiLLMParams( + kaapi_params = TextLLMParams( model="o1", temperature=0.7, reasoning="high", ) - result, warnings = map_kaapi_to_openai_params(kaapi_params) + result, warnings = map_kaapi_to_openai_params( + kaapi_params.model_dump(exclude_none=True) + ) assert result["model"] == "o1" assert result["reasoning"] == {"effort": "high"} @@ -159,12 +187,14 @@ def test_temperature_suppressed_for_reasoning_models(self): def test_temperature_without_reasoning_for_reasoning_models(self): """Test that temperature is suppressed for reasoning models even without explicit reasoning parameter.""" - kaapi_params = KaapiLLMParams( + kaapi_params = TextLLMParams( model="o1", temperature=0.7, ) - result, warnings = map_kaapi_to_openai_params(kaapi_params) + result, warnings = map_kaapi_to_openai_params( + kaapi_params.model_dump(exclude_none=True) + ) assert result["model"] == "o1" assert "temperature" not in result @@ -175,21 +205,25 @@ def test_temperature_without_reasoning_for_reasoning_models(self): def test_minimal_params(self): """Test mapping with minimal parameters (only model).""" - kaapi_params = KaapiLLMParams(model="gpt-4") + kaapi_params = TextLLMParams(model="gpt-4") - result, warnings = map_kaapi_to_openai_params(kaapi_params) + result, warnings = map_kaapi_to_openai_params( + kaapi_params.model_dump(exclude_none=True) + ) assert result == {"model": "gpt-4"} assert warnings == [] def test_only_knowledge_base_ids(self): """Test mapping with only knowledge_base_ids and model.""" - kaapi_params = KaapiLLMParams( + kaapi_params = TextLLMParams( model="gpt-4", knowledge_base_ids=["vs_xyz"], ) - result, warnings = map_kaapi_to_openai_params(kaapi_params) + result, warnings = map_kaapi_to_openai_params( + kaapi_params.model_dump(exclude_none=True) + ) assert result["model"] == "gpt-4" assert "tools" in result @@ -197,6 +231,106 @@ def test_only_knowledge_base_ids(self): assert warnings == [] +class TestMapKaapiToGoogleParams: + """Test cases for map_kaapi_to_google_params function.""" + + def test_basic_model_mapping(self): + """Test basic model parameter mapping.""" + kaapi_params = TextLLMParams(model="gemini-2.5-pro") + + result, warnings = map_kaapi_to_google_params( + kaapi_params.model_dump(exclude_none=True) + ) + + assert result == {"model": "gemini-2.5-pro"} + assert warnings == [] + + def test_instructions_mapping(self): + """Test instructions parameter mapping.""" + kaapi_params = STTLLMParams( + model="gemini-2.5-pro", + instructions="Transcribe this audio accurately.", + ) + + result, warnings = map_kaapi_to_google_params( + kaapi_params.model_dump(exclude_none=True) + ) + + assert result["model"] == "gemini-2.5-pro" + assert result["instructions"] == "Transcribe this audio accurately." + assert warnings == [] + + def test_temperature_mapping(self): + """Test temperature parameter mapping.""" + kaapi_params = TextLLMParams( + model="gemini-2.5-pro", + temperature=0.7, + ) + + result, warnings = map_kaapi_to_google_params( + kaapi_params.model_dump(exclude_none=True) + ) + + assert result["model"] == "gemini-2.5-pro" + assert result["temperature"] == 0.7 + assert warnings == [] + + def test_knowledge_base_ids_warning(self): + """Test that knowledge_base_ids are not supported and generate warning.""" + kaapi_params = TextLLMParams( + model="gemini-2.5-pro", + knowledge_base_ids=["vs_abc123"], + ) + + result, warnings = map_kaapi_to_google_params( + kaapi_params.model_dump(exclude_none=True) + ) + + assert result["model"] == "gemini-2.5-pro" + assert "knowledge_base_ids" not in result + assert len(warnings) == 1 + assert "knowledge_base_ids" in warnings[0].lower() + assert "not supported" in warnings[0] + + def test_reasoning_warning(self): + """Test that reasoning parameter is not supported and generates warning.""" + kaapi_params = TextLLMParams( + model="gemini-2.5-pro", + reasoning="high", + ) + + result, warnings = map_kaapi_to_google_params( + kaapi_params.model_dump(exclude_none=True) + ) + + assert result["model"] == "gemini-2.5-pro" + assert "reasoning" not in result + assert len(warnings) == 1 + assert "reasoning" in warnings[0].lower() + assert "not applicable" in warnings[0] + + def test_multiple_unsupported_params(self): + """Test that multiple unsupported parameters generate multiple warnings.""" + kaapi_params = TextLLMParams( + model="gemini-2.5-pro", + reasoning="medium", + knowledge_base_ids=["vs_123"], + ) + + result, warnings = map_kaapi_to_google_params( + kaapi_params.model_dump(exclude_none=True) + ) + + assert result["model"] == "gemini-2.5-pro" + assert "reasoning" not in result + assert "knowledge_base_ids" not in result + assert len(warnings) == 2 + # Check both warnings are present + warning_text = " ".join(warnings).lower() + assert "reasoning" in warning_text + assert "knowledge_base_ids" in warning_text + + class TestTransformKaapiConfigToNative: """Test cases for transform_kaapi_config_to_native function.""" @@ -204,10 +338,11 @@ def test_transform_openai_config(self): """Test transformation of Kaapi OpenAI config to native format.""" kaapi_config = KaapiCompletionConfig( provider="openai", - params=KaapiLLMParams( - model="gpt-4", - temperature=0.7, - ), + type="text", + params={ + "model": "gpt-4", + "temperature": 0.7, + }, ) result, warnings = transform_kaapi_config_to_native(kaapi_config) @@ -222,13 +357,14 @@ def test_transform_with_all_params(self): """Test transformation with all Kaapi parameters.""" kaapi_config = KaapiCompletionConfig( provider="openai", - params=KaapiLLMParams( - model="gpt-4o", - instructions="System prompt here", - temperature=0.5, - knowledge_base_ids=["vs_abc"], - max_num_results=25, - ), + type="text", + params={ + "model": "gpt-4o", + "instructions": "System prompt here", + "temperature": 0.5, + "knowledge_base_ids": ["vs_abc"], + "max_num_results": 25, + }, ) result, warnings = transform_kaapi_config_to_native(kaapi_config) @@ -245,10 +381,11 @@ def test_transform_with_reasoning(self): """Test transformation with reasoning parameter for reasoning-capable models.""" kaapi_config = KaapiCompletionConfig( provider="openai", - params=KaapiLLMParams( - model="o1", - reasoning="medium", - ), + type="text", + params={ + "model": "o1", + "reasoning": "medium", + }, ) result, warnings = transform_kaapi_config_to_native(kaapi_config) @@ -262,11 +399,12 @@ def test_transform_with_both_temperature_and_reasoning(self): """Test that transformation handles temperature + reasoning intelligently for reasoning models.""" kaapi_config = KaapiCompletionConfig( provider="openai", - params=KaapiLLMParams( - model="o1", - temperature=0.7, - reasoning="high", - ), + type="text", + params={ + "model": "o1", + "temperature": 0.7, + "reasoning": "high", + }, ) result, warnings = transform_kaapi_config_to_native(kaapi_config) @@ -288,7 +426,7 @@ def test_unsupported_provider_raises_error(self): mock_config = MagicMock() mock_config.provider = "unsupported-provider" - mock_config.params = KaapiLLMParams(model="some-model") + mock_config.params = {"model": "some-model"} with pytest.raises(ValueError) as exc_info: transform_kaapi_config_to_native(mock_config) @@ -299,11 +437,12 @@ def test_transform_preserves_param_structure(self): """Test that transformation correctly structures nested parameters.""" kaapi_config = KaapiCompletionConfig( provider="openai", - params=KaapiLLMParams( - model="gpt-4", - knowledge_base_ids=["vs_1", "vs_2", "vs_3"], - max_num_results=15, - ), + type="text", + params={ + "model": "gpt-4", + "knowledge_base_ids": ["vs_1", "vs_2", "vs_3"], + "max_num_results": 15, + }, ) result, warnings = transform_kaapi_config_to_native(kaapi_config) @@ -314,3 +453,44 @@ def test_transform_preserves_param_structure(self): assert isinstance(result.params["tools"][0]["vector_store_ids"], list) assert len(result.params["tools"][0]["vector_store_ids"]) == 3 assert warnings == [] + + def test_transform_google_config(self): + """Test transformation of Kaapi Google AI config to native format.""" + kaapi_config = KaapiCompletionConfig( + provider="google", + type="stt", + params={ + "model": "gemini-2.5-pro", + "instructions": "Transcribe accurately", + "temperature": 0.2, + }, + ) + + result, warnings = transform_kaapi_config_to_native(kaapi_config) + + assert isinstance(result, NativeCompletionConfig) + assert result.provider == "google-native" + assert result.params["model"] == "gemini-2.5-pro" + assert result.params["instructions"] == "Transcribe accurately" + assert result.params["temperature"] == 0.2 + assert warnings == [] + + def test_transform_google_with_unsupported_params(self): + """Test that Google transformation warns about unsupported parameters.""" + kaapi_config = KaapiCompletionConfig( + provider="google", + type="text", + params={ + "model": "gemini-2.5-pro", + "knowledge_base_ids": ["vs_123"], + "reasoning": "high", + }, + ) + + result, warnings = transform_kaapi_config_to_native(kaapi_config) + + assert result.provider == "google-native" + assert result.params["model"] == "gemini-2.5-pro" + assert "knowledge_base_ids" not in result.params + assert "reasoning" not in result.params + assert len(warnings) == 2 diff --git a/backend/app/tests/utils/test_data.py b/backend/app/tests/utils/test_data.py index 8745195d7..dab2c2c44 100644 --- a/backend/app/tests/utils/test_data.py +++ b/backend/app/tests/utils/test_data.py @@ -269,10 +269,11 @@ def create_test_config( config_blob = ConfigBlob( completion=KaapiCompletionConfig( provider="openai", - params=KaapiLLMParams( - model="gpt-4", - temperature=0.7, - ), + type="text", + params={ + "model": "gpt-4", + "temperature": 0.7, + }, ) ) else: @@ -280,6 +281,7 @@ def create_test_config( config_blob = ConfigBlob( completion=NativeCompletionConfig( provider="openai-native", + type="text", params={ "model": "gpt-4", "temperature": 0.7, @@ -311,19 +313,77 @@ def create_test_version( """ Creates and returns a test version for an existing configuration. + If config_blob is not provided, fetches the latest version and creates + a new version with the same type, provider, and similar params. + Persists the version to the database. """ if config_blob is None: - config_blob = ConfigBlob( - completion=NativeCompletionConfig( - provider="openai-native", - params={ - "model": "gpt-4", - "temperature": 0.8, - "max_tokens": 1500, - }, + # Fetch the latest version to maintain type consistency + from sqlmodel import select, and_ + from app.models import ConfigVersion + + stmt = ( + select(ConfigVersion) + .where( + and_( + ConfigVersion.config_id == config_id, + ConfigVersion.deleted_at.is_(None), + ) ) + .order_by(ConfigVersion.version.desc()) + .limit(1) ) + latest_version = db.exec(stmt).first() + + if latest_version: + # Extract the type and provider from the latest version + completion_config = latest_version.config_blob.get("completion", {}) + config_type = completion_config.get("type") + provider = completion_config.get("provider", "openai-native") + + # Create a new config_blob maintaining the same type and provider + if provider in ["openai-native", "google-native"]: + config_blob = ConfigBlob( + completion=NativeCompletionConfig( + provider=provider, + type=config_type, + params={ + "model": completion_config.get("params", {}).get( + "model", "gpt-4" + ), + "temperature": 0.8, + "max_tokens": 1500, + }, + ) + ) + else: + # For Kaapi providers (openai, google) + config_blob = ConfigBlob( + completion=KaapiCompletionConfig( + provider=provider, + type=config_type, + params={ + "model": completion_config.get("params", {}).get( + "model", "gpt-4" + ), + "temperature": 0.8, + }, + ) + ) + else: + # Fallback if no previous version exists (shouldn't happen in normal flow) + config_blob = ConfigBlob( + completion=NativeCompletionConfig( + provider="openai-native", + type="text", + params={ + "model": "gpt-4", + "temperature": 0.8, + "max_tokens": 1500, + }, + ) + ) version_create = ConfigVersionCreate( config_blob=config_blob, diff --git a/backend/pyproject.toml b/backend/pyproject.toml index 6030fc0a1..ce51387d4 100644 --- a/backend/pyproject.toml +++ b/backend/pyproject.toml @@ -36,6 +36,7 @@ dependencies = [ "celery>=5.3.0,<6.0.0", "redis>=5.0.0,<6.0.0", "flower>=2.0.1", + "google-genai>=1.59.0", ] [tool.uv] diff --git a/backend/uv.lock b/backend/uv.lock index fb79c631c..472d21025 100644 --- a/backend/uv.lock +++ b/backend/uv.lock @@ -137,16 +137,16 @@ wheels = [ [[package]] name = "alembic" -version = "1.18.1" +version = "1.18.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "mako" }, { name = "sqlalchemy" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/49/cc/aca263693b2ece99fa99a09b6d092acb89973eb2bb575faef1777e04f8b4/alembic-1.18.1.tar.gz", hash = "sha256:83ac6b81359596816fb3b893099841a0862f2117b2963258e965d70dc62fb866", size = 2044319, upload-time = "2026-01-14T18:53:14.907Z" } +sdist = { url = "https://files.pythonhosted.org/packages/a7/93/07f5ba5d8e4f4049e864faa9d822bbbbfb6f3223a4ffb1376768ab9ee4b8/alembic-1.18.2.tar.gz", hash = "sha256:1c3ddb635f26efbc80b1b90c5652548202022d4e760f6a78d6d85959280e3684", size = 2048272, upload-time = "2026-01-28T21:23:30.914Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/83/36/cd9cb6101e81e39076b2fbe303bfa3c85ca34e55142b0324fcbf22c5c6e2/alembic-1.18.1-py3-none-any.whl", hash = "sha256:f1c3b0920b87134e851c25f1f7f236d8a332c34b75416802d06971df5d1b7810", size = 260973, upload-time = "2026-01-14T18:53:17.533Z" }, + { url = "https://files.pythonhosted.org/packages/1a/60/ced4277ccf61f91eb03c4ac9f63b9567eb814f9ab1cd7835f00fbd5d0c14/alembic-1.18.2-py3-none-any.whl", hash = "sha256:18a5f6448af4864cc308aadf33eb37c0116da9a60fd9bb3f31ccb1b522b4a9b9", size = 261953, upload-time = "2026-01-28T21:23:32.508Z" }, ] [[package]] @@ -206,6 +206,7 @@ dependencies = [ { name = "emails" }, { name = "fastapi", extra = ["standard"] }, { name = "flower" }, + { name = "google-genai" }, { name = "httpx" }, { name = "jinja2" }, { name = "langfuse" }, @@ -252,6 +253,7 @@ requires-dist = [ { name = "emails", specifier = ">=0.6,<1.0" }, { name = "fastapi", extras = ["standard"], specifier = ">=0.116.0" }, { name = "flower", specifier = ">=2.0.1" }, + { name = "google-genai", specifier = ">=1.59.0" }, { name = "httpx", specifier = ">=0.25.1,<1.0.0" }, { name = "jinja2", specifier = ">=3.1.4,<4.0.0" }, { name = "langfuse", specifier = "==2.60.3" }, @@ -348,39 +350,39 @@ wheels = [ [[package]] name = "boto3" -version = "1.42.35" +version = "1.42.37" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "botocore" }, { name = "jmespath" }, { name = "s3transfer" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/e8/a4/e70cc79e8f91836c06021c35507c843e5bc39a2020a85a6a27a492b50f78/boto3-1.42.35.tar.gz", hash = "sha256:edbfbfbadd419e65888166dd044786d4b731cf60abeb2301b73e775e154d7c5e", size = 112928, upload-time = "2026-01-26T20:35:37.524Z" } +sdist = { url = "https://files.pythonhosted.org/packages/a9/ef/0d6ceb88ae2b3638b956190a431e4a8a3697d5769d4bbbede8efcccacaea/boto3-1.42.37.tar.gz", hash = "sha256:d8b6c52c86f3bf04f71a5a53e7fb4d1527592afebffa5170cf3ef7d70966e610", size = 112830, upload-time = "2026-01-28T20:38:43.339Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/57/26/75b6301514c74c398207462086af6cfe2a875fd8700a6e508559bb1ed21a/boto3-1.42.35-py3-none-any.whl", hash = "sha256:4251bbac90e4a190680439973d9e9ed851e50292c10cd063c8bf0c365410ffe1", size = 140606, upload-time = "2026-01-26T20:35:35.398Z" }, + { url = "https://files.pythonhosted.org/packages/fb/a4/cd334f74498acc6ad42a69c48e8c495f6f721d8abe13f8ef0d4b862fb1c0/boto3-1.42.37-py3-none-any.whl", hash = "sha256:e1e38fd178ffc66cfbe9cb6838b8c460000c3eb741e5f40f57eb730780ef0ed4", size = 140604, upload-time = "2026-01-28T20:38:42.135Z" }, ] [[package]] name = "botocore" -version = "1.42.35" +version = "1.42.37" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "jmespath" }, { name = "python-dateutil" }, { name = "urllib3" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/e2/3d/339edff36a3c6617900ec9d7a1203ffe4e06ffee1e5bd71126e31cd59e30/botocore-1.42.35.tar.gz", hash = "sha256:40a6e0f16afe9e5d42e956f0b6d909869793fadb21780e409063601fc3d094b8", size = 14903745, upload-time = "2026-01-26T20:35:25.85Z" } +sdist = { url = "https://files.pythonhosted.org/packages/d5/4d/94292e7686e64d2ede8dae7102bbb11a1474e407c830de4192f2518e6cff/botocore-1.42.37.tar.gz", hash = "sha256:3ec58eb98b0857f67a2ae6aa3ded51597e7335f7640be654e0e86da4f173b5b2", size = 14914621, upload-time = "2026-01-28T20:38:34.586Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/74/b6/68f0aec79462852f367128dd8892e47176da46a787386d1730ec5bbbfb01/botocore-1.42.35-py3-none-any.whl", hash = "sha256:b89f527987691abbd1374c4116cc2711471ce48e6da502db17e92b17b2af8d47", size = 14581567, upload-time = "2026-01-26T20:35:23.346Z" }, + { url = "https://files.pythonhosted.org/packages/72/30/54042dd3ad8161964f8f47aa418785079bd8d2f17053c40d65bafb9f6eed/botocore-1.42.37-py3-none-any.whl", hash = "sha256:f13bb8b560a10714d96fb7b0c7f17828dfa6e6606a1ead8c01c6ebb8765acbd8", size = 14589390, upload-time = "2026-01-28T20:38:31.306Z" }, ] [[package]] name = "cachetools" -version = "6.2.5" +version = "6.2.6" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/86/e7/18ea2907d2ca91e9c0697596b8e60cd485b091152eb4109fad1e468e457d/cachetools-6.2.5.tar.gz", hash = "sha256:6d8bfbba1ba94412fb9d9196c4da7a87e9d4928fffc5e93542965dca4740c77f", size = 32168, upload-time = "2026-01-25T14:57:40.349Z" } +sdist = { url = "https://files.pythonhosted.org/packages/39/91/d9ae9a66b01102a18cd16db0cf4cd54187ffe10f0865cc80071a4104fbb3/cachetools-6.2.6.tar.gz", hash = "sha256:16c33e1f276b9a9c0b49ab5782d901e3ad3de0dd6da9bf9bcd29ac5672f2f9e6", size = 32363, upload-time = "2026-01-27T20:32:59.956Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e3/a6/24169d70ec5264b65ba54ba49b3d10f46d6b1ad97e185c94556539b3dfc8/cachetools-6.2.5-py3-none-any.whl", hash = "sha256:db3ae5465e90befb7c74720dd9308d77a09b7cf13433570e07caa0845c30d5fe", size = 11553, upload-time = "2026-01-25T14:57:39.112Z" }, + { url = "https://files.pythonhosted.org/packages/90/45/f458fa2c388e79dd9d8b9b0c99f1d31b568f27388f2fdba7bb66bbc0c6ed/cachetools-6.2.6-py3-none-any.whl", hash = "sha256:8c9717235b3c651603fff0076db52d6acbfd1b338b8ed50256092f7ce9c85bda", size = 11668, upload-time = "2026-01-27T20:32:58.527Z" }, ] [[package]] @@ -678,58 +680,55 @@ wheels = [ [[package]] name = "cryptography" -version = "46.0.3" +version = "46.0.4" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/9f/33/c00162f49c0e2fe8064a62cb92b93e50c74a72bc370ab92f86112b33ff62/cryptography-46.0.3.tar.gz", hash = "sha256:a8b17438104fed022ce745b362294d9ce35b4c2e45c1d958ad4a4b019285f4a1", size = 749258, upload-time = "2025-10-15T23:18:31.74Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/1d/42/9c391dd801d6cf0d561b5890549d4b27bafcc53b39c31a817e69d87c625b/cryptography-46.0.3-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:109d4ddfadf17e8e7779c39f9b18111a09efb969a301a31e987416a0191ed93a", size = 7225004, upload-time = "2025-10-15T23:16:52.239Z" }, - { url = "https://files.pythonhosted.org/packages/1c/67/38769ca6b65f07461eb200e85fc1639b438bdc667be02cf7f2cd6a64601c/cryptography-46.0.3-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:09859af8466b69bc3c27bdf4f5d84a665e0f7ab5088412e9e2ec49758eca5cbc", size = 4296667, upload-time = "2025-10-15T23:16:54.369Z" }, - { url = "https://files.pythonhosted.org/packages/5c/49/498c86566a1d80e978b42f0d702795f69887005548c041636df6ae1ca64c/cryptography-46.0.3-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:01ca9ff2885f3acc98c29f1860552e37f6d7c7d013d7334ff2a9de43a449315d", size = 4450807, upload-time = "2025-10-15T23:16:56.414Z" }, - { url = "https://files.pythonhosted.org/packages/4b/0a/863a3604112174c8624a2ac3c038662d9e59970c7f926acdcfaed8d61142/cryptography-46.0.3-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:6eae65d4c3d33da080cff9c4ab1f711b15c1d9760809dad6ea763f3812d254cb", size = 4299615, upload-time = "2025-10-15T23:16:58.442Z" }, - { url = "https://files.pythonhosted.org/packages/64/02/b73a533f6b64a69f3cd3872acb6ebc12aef924d8d103133bb3ea750dc703/cryptography-46.0.3-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e5bf0ed4490068a2e72ac03d786693adeb909981cc596425d09032d372bcc849", size = 4016800, upload-time = "2025-10-15T23:17:00.378Z" }, - { url = "https://files.pythonhosted.org/packages/25/d5/16e41afbfa450cde85a3b7ec599bebefaef16b5c6ba4ec49a3532336ed72/cryptography-46.0.3-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:5ecfccd2329e37e9b7112a888e76d9feca2347f12f37918facbb893d7bb88ee8", size = 4984707, upload-time = "2025-10-15T23:17:01.98Z" }, - { url = "https://files.pythonhosted.org/packages/c9/56/e7e69b427c3878352c2fb9b450bd0e19ed552753491d39d7d0a2f5226d41/cryptography-46.0.3-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:a2c0cd47381a3229c403062f764160d57d4d175e022c1df84e168c6251a22eec", size = 4482541, upload-time = "2025-10-15T23:17:04.078Z" }, - { url = "https://files.pythonhosted.org/packages/78/f6/50736d40d97e8483172f1bb6e698895b92a223dba513b0ca6f06b2365339/cryptography-46.0.3-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:549e234ff32571b1f4076ac269fcce7a808d3bf98b76c8dd560e42dbc66d7d91", size = 4299464, upload-time = "2025-10-15T23:17:05.483Z" }, - { url = "https://files.pythonhosted.org/packages/00/de/d8e26b1a855f19d9994a19c702fa2e93b0456beccbcfe437eda00e0701f2/cryptography-46.0.3-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:c0a7bb1a68a5d3471880e264621346c48665b3bf1c3759d682fc0864c540bd9e", size = 4950838, upload-time = "2025-10-15T23:17:07.425Z" }, - { url = "https://files.pythonhosted.org/packages/8f/29/798fc4ec461a1c9e9f735f2fc58741b0daae30688f41b2497dcbc9ed1355/cryptography-46.0.3-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:10b01676fc208c3e6feeb25a8b83d81767e8059e1fe86e1dc62d10a3018fa926", size = 4481596, upload-time = "2025-10-15T23:17:09.343Z" }, - { url = "https://files.pythonhosted.org/packages/15/8d/03cd48b20a573adfff7652b76271078e3045b9f49387920e7f1f631d125e/cryptography-46.0.3-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:0abf1ffd6e57c67e92af68330d05760b7b7efb243aab8377e583284dbab72c71", size = 4426782, upload-time = "2025-10-15T23:17:11.22Z" }, - { url = "https://files.pythonhosted.org/packages/fa/b1/ebacbfe53317d55cf33165bda24c86523497a6881f339f9aae5c2e13e57b/cryptography-46.0.3-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a04bee9ab6a4da801eb9b51f1b708a1b5b5c9eb48c03f74198464c66f0d344ac", size = 4698381, upload-time = "2025-10-15T23:17:12.829Z" }, - { url = "https://files.pythonhosted.org/packages/96/92/8a6a9525893325fc057a01f654d7efc2c64b9de90413adcf605a85744ff4/cryptography-46.0.3-cp311-abi3-win32.whl", hash = "sha256:f260d0d41e9b4da1ed1e0f1ce571f97fe370b152ab18778e9e8f67d6af432018", size = 3055988, upload-time = "2025-10-15T23:17:14.65Z" }, - { url = "https://files.pythonhosted.org/packages/7e/bf/80fbf45253ea585a1e492a6a17efcb93467701fa79e71550a430c5e60df0/cryptography-46.0.3-cp311-abi3-win_amd64.whl", hash = "sha256:a9a3008438615669153eb86b26b61e09993921ebdd75385ddd748702c5adfddb", size = 3514451, upload-time = "2025-10-15T23:17:16.142Z" }, - { url = "https://files.pythonhosted.org/packages/2e/af/9b302da4c87b0beb9db4e756386a7c6c5b8003cd0e742277888d352ae91d/cryptography-46.0.3-cp311-abi3-win_arm64.whl", hash = "sha256:5d7f93296ee28f68447397bf5198428c9aeeab45705a55d53a6343455dcb2c3c", size = 2928007, upload-time = "2025-10-15T23:17:18.04Z" }, - { url = "https://files.pythonhosted.org/packages/f5/e2/a510aa736755bffa9d2f75029c229111a1d02f8ecd5de03078f4c18d91a3/cryptography-46.0.3-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:00a5e7e87938e5ff9ff5447ab086a5706a957137e6e433841e9d24f38a065217", size = 7158012, upload-time = "2025-10-15T23:17:19.982Z" }, - { url = "https://files.pythonhosted.org/packages/73/dc/9aa866fbdbb95b02e7f9d086f1fccfeebf8953509b87e3f28fff927ff8a0/cryptography-46.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c8daeb2d2174beb4575b77482320303f3d39b8e81153da4f0fb08eb5fe86a6c5", size = 4288728, upload-time = "2025-10-15T23:17:21.527Z" }, - { url = "https://files.pythonhosted.org/packages/c5/fd/bc1daf8230eaa075184cbbf5f8cd00ba9db4fd32d63fb83da4671b72ed8a/cryptography-46.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:39b6755623145ad5eff1dab323f4eae2a32a77a7abef2c5089a04a3d04366715", size = 4435078, upload-time = "2025-10-15T23:17:23.042Z" }, - { url = "https://files.pythonhosted.org/packages/82/98/d3bd5407ce4c60017f8ff9e63ffee4200ab3e23fe05b765cab805a7db008/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:db391fa7c66df6762ee3f00c95a89e6d428f4d60e7abc8328f4fe155b5ac6e54", size = 4293460, upload-time = "2025-10-15T23:17:24.885Z" }, - { url = "https://files.pythonhosted.org/packages/26/e9/e23e7900983c2b8af7a08098db406cf989d7f09caea7897e347598d4cd5b/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:78a97cf6a8839a48c49271cdcbd5cf37ca2c1d6b7fdd86cc864f302b5e9bf459", size = 3995237, upload-time = "2025-10-15T23:17:26.449Z" }, - { url = "https://files.pythonhosted.org/packages/91/15/af68c509d4a138cfe299d0d7ddb14afba15233223ebd933b4bbdbc7155d3/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:dfb781ff7eaa91a6f7fd41776ec37c5853c795d3b358d4896fdbb5df168af422", size = 4967344, upload-time = "2025-10-15T23:17:28.06Z" }, - { url = "https://files.pythonhosted.org/packages/ca/e3/8643d077c53868b681af077edf6b3cb58288b5423610f21c62aadcbe99f4/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:6f61efb26e76c45c4a227835ddeae96d83624fb0d29eb5df5b96e14ed1a0afb7", size = 4466564, upload-time = "2025-10-15T23:17:29.665Z" }, - { url = "https://files.pythonhosted.org/packages/0e/43/c1e8726fa59c236ff477ff2b5dc071e54b21e5a1e51aa2cee1676f1c986f/cryptography-46.0.3-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:23b1a8f26e43f47ceb6d6a43115f33a5a37d57df4ea0ca295b780ae8546e8044", size = 4292415, upload-time = "2025-10-15T23:17:31.686Z" }, - { url = "https://files.pythonhosted.org/packages/42/f9/2f8fefdb1aee8a8e3256a0568cffc4e6d517b256a2fe97a029b3f1b9fe7e/cryptography-46.0.3-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:b419ae593c86b87014b9be7396b385491ad7f320bde96826d0dd174459e54665", size = 4931457, upload-time = "2025-10-15T23:17:33.478Z" }, - { url = "https://files.pythonhosted.org/packages/79/30/9b54127a9a778ccd6d27c3da7563e9f2d341826075ceab89ae3b41bf5be2/cryptography-46.0.3-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:50fc3343ac490c6b08c0cf0d704e881d0d660be923fd3076db3e932007e726e3", size = 4466074, upload-time = "2025-10-15T23:17:35.158Z" }, - { url = "https://files.pythonhosted.org/packages/ac/68/b4f4a10928e26c941b1b6a179143af9f4d27d88fe84a6a3c53592d2e76bf/cryptography-46.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:22d7e97932f511d6b0b04f2bfd818d73dcd5928db509460aaf48384778eb6d20", size = 4420569, upload-time = "2025-10-15T23:17:37.188Z" }, - { url = "https://files.pythonhosted.org/packages/a3/49/3746dab4c0d1979888f125226357d3262a6dd40e114ac29e3d2abdf1ec55/cryptography-46.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:d55f3dffadd674514ad19451161118fd010988540cee43d8bc20675e775925de", size = 4681941, upload-time = "2025-10-15T23:17:39.236Z" }, - { url = "https://files.pythonhosted.org/packages/fd/30/27654c1dbaf7e4a3531fa1fc77986d04aefa4d6d78259a62c9dc13d7ad36/cryptography-46.0.3-cp314-cp314t-win32.whl", hash = "sha256:8a6e050cb6164d3f830453754094c086ff2d0b2f3a897a1d9820f6139a1f0914", size = 3022339, upload-time = "2025-10-15T23:17:40.888Z" }, - { url = "https://files.pythonhosted.org/packages/f6/30/640f34ccd4d2a1bc88367b54b926b781b5a018d65f404d409aba76a84b1c/cryptography-46.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:760f83faa07f8b64e9c33fc963d790a2edb24efb479e3520c14a45741cd9b2db", size = 3494315, upload-time = "2025-10-15T23:17:42.769Z" }, - { url = "https://files.pythonhosted.org/packages/ba/8b/88cc7e3bd0a8e7b861f26981f7b820e1f46aa9d26cc482d0feba0ecb4919/cryptography-46.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:516ea134e703e9fe26bcd1277a4b59ad30586ea90c365a87781d7887a646fe21", size = 2919331, upload-time = "2025-10-15T23:17:44.468Z" }, - { url = "https://files.pythonhosted.org/packages/fd/23/45fe7f376a7df8daf6da3556603b36f53475a99ce4faacb6ba2cf3d82021/cryptography-46.0.3-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:cb3d760a6117f621261d662bccc8ef5bc32ca673e037c83fbe565324f5c46936", size = 7218248, upload-time = "2025-10-15T23:17:46.294Z" }, - { url = "https://files.pythonhosted.org/packages/27/32/b68d27471372737054cbd34c84981f9edbc24fe67ca225d389799614e27f/cryptography-46.0.3-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:4b7387121ac7d15e550f5cb4a43aef2559ed759c35df7336c402bb8275ac9683", size = 4294089, upload-time = "2025-10-15T23:17:48.269Z" }, - { url = "https://files.pythonhosted.org/packages/26/42/fa8389d4478368743e24e61eea78846a0006caffaf72ea24a15159215a14/cryptography-46.0.3-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:15ab9b093e8f09daab0f2159bb7e47532596075139dd74365da52ecc9cb46c5d", size = 4440029, upload-time = "2025-10-15T23:17:49.837Z" }, - { url = "https://files.pythonhosted.org/packages/5f/eb/f483db0ec5ac040824f269e93dd2bd8a21ecd1027e77ad7bdf6914f2fd80/cryptography-46.0.3-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:46acf53b40ea38f9c6c229599a4a13f0d46a6c3fa9ef19fc1a124d62e338dfa0", size = 4297222, upload-time = "2025-10-15T23:17:51.357Z" }, - { url = "https://files.pythonhosted.org/packages/fd/cf/da9502c4e1912cb1da3807ea3618a6829bee8207456fbbeebc361ec38ba3/cryptography-46.0.3-cp38-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:10ca84c4668d066a9878890047f03546f3ae0a6b8b39b697457b7757aaf18dbc", size = 4012280, upload-time = "2025-10-15T23:17:52.964Z" }, - { url = "https://files.pythonhosted.org/packages/6b/8f/9adb86b93330e0df8b3dcf03eae67c33ba89958fc2e03862ef1ac2b42465/cryptography-46.0.3-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:36e627112085bb3b81b19fed209c05ce2a52ee8b15d161b7c643a7d5a88491f3", size = 4978958, upload-time = "2025-10-15T23:17:54.965Z" }, - { url = "https://files.pythonhosted.org/packages/d1/a0/5fa77988289c34bdb9f913f5606ecc9ada1adb5ae870bd0d1054a7021cc4/cryptography-46.0.3-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:1000713389b75c449a6e979ffc7dcc8ac90b437048766cef052d4d30b8220971", size = 4473714, upload-time = "2025-10-15T23:17:56.754Z" }, - { url = "https://files.pythonhosted.org/packages/14/e5/fc82d72a58d41c393697aa18c9abe5ae1214ff6f2a5c18ac470f92777895/cryptography-46.0.3-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:b02cf04496f6576afffef5ddd04a0cb7d49cf6be16a9059d793a30b035f6b6ac", size = 4296970, upload-time = "2025-10-15T23:17:58.588Z" }, - { url = "https://files.pythonhosted.org/packages/78/06/5663ed35438d0b09056973994f1aec467492b33bd31da36e468b01ec1097/cryptography-46.0.3-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:71e842ec9bc7abf543b47cf86b9a743baa95f4677d22baa4c7d5c69e49e9bc04", size = 4940236, upload-time = "2025-10-15T23:18:00.897Z" }, - { url = "https://files.pythonhosted.org/packages/fc/59/873633f3f2dcd8a053b8dd1d38f783043b5fce589c0f6988bf55ef57e43e/cryptography-46.0.3-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:402b58fc32614f00980b66d6e56a5b4118e6cb362ae8f3fda141ba4689bd4506", size = 4472642, upload-time = "2025-10-15T23:18:02.749Z" }, - { url = "https://files.pythonhosted.org/packages/3d/39/8e71f3930e40f6877737d6f69248cf74d4e34b886a3967d32f919cc50d3b/cryptography-46.0.3-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ef639cb3372f69ec44915fafcd6698b6cc78fbe0c2ea41be867f6ed612811963", size = 4423126, upload-time = "2025-10-15T23:18:04.85Z" }, - { url = "https://files.pythonhosted.org/packages/cd/c7/f65027c2810e14c3e7268353b1681932b87e5a48e65505d8cc17c99e36ae/cryptography-46.0.3-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:3b51b8ca4f1c6453d8829e1eb7299499ca7f313900dd4d89a24b8b87c0a780d4", size = 4686573, upload-time = "2025-10-15T23:18:06.908Z" }, - { url = "https://files.pythonhosted.org/packages/0a/6e/1c8331ddf91ca4730ab3086a0f1be19c65510a33b5a441cb334e7a2d2560/cryptography-46.0.3-cp38-abi3-win32.whl", hash = "sha256:6276eb85ef938dc035d59b87c8a7dc559a232f954962520137529d77b18ff1df", size = 3036695, upload-time = "2025-10-15T23:18:08.672Z" }, - { url = "https://files.pythonhosted.org/packages/90/45/b0d691df20633eff80955a0fc7695ff9051ffce8b69741444bd9ed7bd0db/cryptography-46.0.3-cp38-abi3-win_amd64.whl", hash = "sha256:416260257577718c05135c55958b674000baef9a1c7d9e8f306ec60d71db850f", size = 3501720, upload-time = "2025-10-15T23:18:10.632Z" }, - { url = "https://files.pythonhosted.org/packages/e8/cb/2da4cc83f5edb9c3257d09e1e7ab7b23f049c7962cae8d842bbef0a9cec9/cryptography-46.0.3-cp38-abi3-win_arm64.whl", hash = "sha256:d89c3468de4cdc4f08a57e214384d0471911a3830fcdaf7a8cc587e42a866372", size = 2918740, upload-time = "2025-10-15T23:18:12.277Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/78/19/f748958276519adf6a0c1e79e7b8860b4830dda55ccdf29f2719b5fc499c/cryptography-46.0.4.tar.gz", hash = "sha256:bfd019f60f8abc2ed1b9be4ddc21cfef059c841d86d710bb69909a688cbb8f59", size = 749301, upload-time = "2026-01-28T00:24:37.379Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8d/99/157aae7949a5f30d51fcb1a9851e8ebd5c74bf99b5285d8bb4b8b9ee641e/cryptography-46.0.4-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:281526e865ed4166009e235afadf3a4c4cba6056f99336a99efba65336fd5485", size = 7173686, upload-time = "2026-01-28T00:23:07.515Z" }, + { url = "https://files.pythonhosted.org/packages/87/91/874b8910903159043b5c6a123b7e79c4559ddd1896e38967567942635778/cryptography-46.0.4-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5f14fba5bf6f4390d7ff8f086c566454bff0411f6d8aa7af79c88b6f9267aecc", size = 4275871, upload-time = "2026-01-28T00:23:09.439Z" }, + { url = "https://files.pythonhosted.org/packages/c0/35/690e809be77896111f5b195ede56e4b4ed0435b428c2f2b6d35046fbb5e8/cryptography-46.0.4-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:47bcd19517e6389132f76e2d5303ded6cf3f78903da2158a671be8de024f4cd0", size = 4423124, upload-time = "2026-01-28T00:23:11.529Z" }, + { url = "https://files.pythonhosted.org/packages/1a/5b/a26407d4f79d61ca4bebaa9213feafdd8806dc69d3d290ce24996d3cfe43/cryptography-46.0.4-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:01df4f50f314fbe7009f54046e908d1754f19d0c6d3070df1e6268c5a4af09fa", size = 4277090, upload-time = "2026-01-28T00:23:13.123Z" }, + { url = "https://files.pythonhosted.org/packages/0c/d8/4bb7aec442a9049827aa34cee1aa83803e528fa55da9a9d45d01d1bb933e/cryptography-46.0.4-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:5aa3e463596b0087b3da0dbe2b2487e9fc261d25da85754e30e3b40637d61f81", size = 4947652, upload-time = "2026-01-28T00:23:14.554Z" }, + { url = "https://files.pythonhosted.org/packages/2b/08/f83e2e0814248b844265802d081f2fac2f1cbe6cd258e72ba14ff006823a/cryptography-46.0.4-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:0a9ad24359fee86f131836a9ac3bffc9329e956624a2d379b613f8f8abaf5255", size = 4455157, upload-time = "2026-01-28T00:23:16.443Z" }, + { url = "https://files.pythonhosted.org/packages/0a/05/19d849cf4096448779d2dcc9bb27d097457dac36f7273ffa875a93b5884c/cryptography-46.0.4-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:dc1272e25ef673efe72f2096e92ae39dea1a1a450dd44918b15351f72c5a168e", size = 3981078, upload-time = "2026-01-28T00:23:17.838Z" }, + { url = "https://files.pythonhosted.org/packages/e6/89/f7bac81d66ba7cde867a743ea5b37537b32b5c633c473002b26a226f703f/cryptography-46.0.4-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:de0f5f4ec8711ebc555f54735d4c673fc34b65c44283895f1a08c2b49d2fd99c", size = 4276213, upload-time = "2026-01-28T00:23:19.257Z" }, + { url = "https://files.pythonhosted.org/packages/da/9f/7133e41f24edd827020ad21b068736e792bc68eecf66d93c924ad4719fb3/cryptography-46.0.4-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:eeeb2e33d8dbcccc34d64651f00a98cb41b2dc69cef866771a5717e6734dfa32", size = 4912190, upload-time = "2026-01-28T00:23:21.244Z" }, + { url = "https://files.pythonhosted.org/packages/a6/f7/6d43cbaddf6f65b24816e4af187d211f0bc536a29961f69faedc48501d8e/cryptography-46.0.4-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:3d425eacbc9aceafd2cb429e42f4e5d5633c6f873f5e567077043ef1b9bbf616", size = 4454641, upload-time = "2026-01-28T00:23:22.866Z" }, + { url = "https://files.pythonhosted.org/packages/9e/4f/ebd0473ad656a0ac912a16bd07db0f5d85184924e14fc88feecae2492834/cryptography-46.0.4-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:91627ebf691d1ea3976a031b61fb7bac1ccd745afa03602275dda443e11c8de0", size = 4405159, upload-time = "2026-01-28T00:23:25.278Z" }, + { url = "https://files.pythonhosted.org/packages/d1/f7/7923886f32dc47e27adeff8246e976d77258fd2aa3efdd1754e4e323bf49/cryptography-46.0.4-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2d08bc22efd73e8854b0b7caff402d735b354862f1145d7be3b9c0f740fef6a0", size = 4666059, upload-time = "2026-01-28T00:23:26.766Z" }, + { url = "https://files.pythonhosted.org/packages/eb/a7/0fca0fd3591dffc297278a61813d7f661a14243dd60f499a7a5b48acb52a/cryptography-46.0.4-cp311-abi3-win32.whl", hash = "sha256:82a62483daf20b8134f6e92898da70d04d0ef9a75829d732ea1018678185f4f5", size = 3026378, upload-time = "2026-01-28T00:23:28.317Z" }, + { url = "https://files.pythonhosted.org/packages/2d/12/652c84b6f9873f0909374864a57b003686c642ea48c84d6c7e2c515e6da5/cryptography-46.0.4-cp311-abi3-win_amd64.whl", hash = "sha256:6225d3ebe26a55dbc8ead5ad1265c0403552a63336499564675b29eb3184c09b", size = 3478614, upload-time = "2026-01-28T00:23:30.275Z" }, + { url = "https://files.pythonhosted.org/packages/b9/27/542b029f293a5cce59349d799d4d8484b3b1654a7b9a0585c266e974a488/cryptography-46.0.4-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:485e2b65d25ec0d901bca7bcae0f53b00133bf3173916d8e421f6fddde103908", size = 7116417, upload-time = "2026-01-28T00:23:31.958Z" }, + { url = "https://files.pythonhosted.org/packages/f8/f5/559c25b77f40b6bf828eabaf988efb8b0e17b573545edb503368ca0a2a03/cryptography-46.0.4-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:078e5f06bd2fa5aea5a324f2a09f914b1484f1d0c2a4d6a8a28c74e72f65f2da", size = 4264508, upload-time = "2026-01-28T00:23:34.264Z" }, + { url = "https://files.pythonhosted.org/packages/49/a1/551fa162d33074b660dc35c9bc3616fefa21a0e8c1edd27b92559902e408/cryptography-46.0.4-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:dce1e4f068f03008da7fa51cc7abc6ddc5e5de3e3d1550334eaf8393982a5829", size = 4409080, upload-time = "2026-01-28T00:23:35.793Z" }, + { url = "https://files.pythonhosted.org/packages/b0/6a/4d8d129a755f5d6df1bbee69ea2f35ebfa954fa1847690d1db2e8bca46a5/cryptography-46.0.4-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:2067461c80271f422ee7bdbe79b9b4be54a5162e90345f86a23445a0cf3fd8a2", size = 4270039, upload-time = "2026-01-28T00:23:37.263Z" }, + { url = "https://files.pythonhosted.org/packages/4c/f5/ed3fcddd0a5e39321e595e144615399e47e7c153a1fb8c4862aec3151ff9/cryptography-46.0.4-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:c92010b58a51196a5f41c3795190203ac52edfd5dc3ff99149b4659eba9d2085", size = 4926748, upload-time = "2026-01-28T00:23:38.884Z" }, + { url = "https://files.pythonhosted.org/packages/43/ae/9f03d5f0c0c00e85ecb34f06d3b79599f20630e4db91b8a6e56e8f83d410/cryptography-46.0.4-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:829c2b12bbc5428ab02d6b7f7e9bbfd53e33efd6672d21341f2177470171ad8b", size = 4442307, upload-time = "2026-01-28T00:23:40.56Z" }, + { url = "https://files.pythonhosted.org/packages/8b/22/e0f9f2dae8040695103369cf2283ef9ac8abe4d51f68710bec2afd232609/cryptography-46.0.4-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:62217ba44bf81b30abaeda1488686a04a702a261e26f87db51ff61d9d3510abd", size = 3959253, upload-time = "2026-01-28T00:23:42.827Z" }, + { url = "https://files.pythonhosted.org/packages/01/5b/6a43fcccc51dae4d101ac7d378a8724d1ba3de628a24e11bf2f4f43cba4d/cryptography-46.0.4-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:9c2da296c8d3415b93e6053f5a728649a87a48ce084a9aaf51d6e46c87c7f2d2", size = 4269372, upload-time = "2026-01-28T00:23:44.655Z" }, + { url = "https://files.pythonhosted.org/packages/17/b7/0f6b8c1dd0779df2b526e78978ff00462355e31c0a6f6cff8a3e99889c90/cryptography-46.0.4-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:9b34d8ba84454641a6bf4d6762d15847ecbd85c1316c0a7984e6e4e9f748ec2e", size = 4891908, upload-time = "2026-01-28T00:23:46.48Z" }, + { url = "https://files.pythonhosted.org/packages/83/17/259409b8349aa10535358807a472c6a695cf84f106022268d31cea2b6c97/cryptography-46.0.4-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:df4a817fa7138dd0c96c8c8c20f04b8aaa1fac3bbf610913dcad8ea82e1bfd3f", size = 4441254, upload-time = "2026-01-28T00:23:48.403Z" }, + { url = "https://files.pythonhosted.org/packages/9c/fe/e4a1b0c989b00cee5ffa0764401767e2d1cf59f45530963b894129fd5dce/cryptography-46.0.4-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:b1de0ebf7587f28f9190b9cb526e901bf448c9e6a99655d2b07fff60e8212a82", size = 4396520, upload-time = "2026-01-28T00:23:50.26Z" }, + { url = "https://files.pythonhosted.org/packages/b3/81/ba8fd9657d27076eb40d6a2f941b23429a3c3d2f56f5a921d6b936a27bc9/cryptography-46.0.4-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:9b4d17bc7bd7cdd98e3af40b441feaea4c68225e2eb2341026c84511ad246c0c", size = 4651479, upload-time = "2026-01-28T00:23:51.674Z" }, + { url = "https://files.pythonhosted.org/packages/00/03/0de4ed43c71c31e4fe954edd50b9d28d658fef56555eba7641696370a8e2/cryptography-46.0.4-cp314-cp314t-win32.whl", hash = "sha256:c411f16275b0dea722d76544a61d6421e2cc829ad76eec79280dbdc9ddf50061", size = 3001986, upload-time = "2026-01-28T00:23:53.485Z" }, + { url = "https://files.pythonhosted.org/packages/5c/70/81830b59df7682917d7a10f833c4dab2a5574cd664e86d18139f2b421329/cryptography-46.0.4-cp314-cp314t-win_amd64.whl", hash = "sha256:728fedc529efc1439eb6107b677f7f7558adab4553ef8669f0d02d42d7b959a7", size = 3468288, upload-time = "2026-01-28T00:23:55.09Z" }, + { url = "https://files.pythonhosted.org/packages/56/f7/f648fdbb61d0d45902d3f374217451385edc7e7768d1b03ff1d0e5ffc17b/cryptography-46.0.4-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a9556ba711f7c23f77b151d5798f3ac44a13455cc68db7697a1096e6d0563cab", size = 7169583, upload-time = "2026-01-28T00:23:56.558Z" }, + { url = "https://files.pythonhosted.org/packages/d8/cc/8f3224cbb2a928de7298d6ed4790f5ebc48114e02bdc9559196bfb12435d/cryptography-46.0.4-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8bf75b0259e87fa70bddc0b8b4078b76e7fd512fd9afae6c1193bcf440a4dbef", size = 4275419, upload-time = "2026-01-28T00:23:58.364Z" }, + { url = "https://files.pythonhosted.org/packages/17/43/4a18faa7a872d00e4264855134ba82d23546c850a70ff209e04ee200e76f/cryptography-46.0.4-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3c268a3490df22270955966ba236d6bc4a8f9b6e4ffddb78aac535f1a5ea471d", size = 4419058, upload-time = "2026-01-28T00:23:59.867Z" }, + { url = "https://files.pythonhosted.org/packages/ee/64/6651969409821d791ba12346a124f55e1b76f66a819254ae840a965d4b9c/cryptography-46.0.4-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:812815182f6a0c1d49a37893a303b44eaac827d7f0d582cecfc81b6427f22973", size = 4278151, upload-time = "2026-01-28T00:24:01.731Z" }, + { url = "https://files.pythonhosted.org/packages/20/0b/a7fce65ee08c3c02f7a8310cc090a732344066b990ac63a9dfd0a655d321/cryptography-46.0.4-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:a90e43e3ef65e6dcf969dfe3bb40cbf5aef0d523dff95bfa24256be172a845f4", size = 4939441, upload-time = "2026-01-28T00:24:03.175Z" }, + { url = "https://files.pythonhosted.org/packages/db/a7/20c5701e2cd3e1dfd7a19d2290c522a5f435dd30957d431dcb531d0f1413/cryptography-46.0.4-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:a05177ff6296644ef2876fce50518dffb5bcdf903c85250974fc8bc85d54c0af", size = 4451617, upload-time = "2026-01-28T00:24:05.403Z" }, + { url = "https://files.pythonhosted.org/packages/00/dc/3e16030ea9aa47b63af6524c354933b4fb0e352257c792c4deeb0edae367/cryptography-46.0.4-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:daa392191f626d50f1b136c9b4cf08af69ca8279d110ea24f5c2700054d2e263", size = 3977774, upload-time = "2026-01-28T00:24:06.851Z" }, + { url = "https://files.pythonhosted.org/packages/42/c8/ad93f14118252717b465880368721c963975ac4b941b7ef88f3c56bf2897/cryptography-46.0.4-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:e07ea39c5b048e085f15923511d8121e4a9dc45cee4e3b970ca4f0d338f23095", size = 4277008, upload-time = "2026-01-28T00:24:08.926Z" }, + { url = "https://files.pythonhosted.org/packages/00/cf/89c99698151c00a4631fbfcfcf459d308213ac29e321b0ff44ceeeac82f1/cryptography-46.0.4-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:d5a45ddc256f492ce42a4e35879c5e5528c09cd9ad12420828c972951d8e016b", size = 4903339, upload-time = "2026-01-28T00:24:12.009Z" }, + { url = "https://files.pythonhosted.org/packages/03/c3/c90a2cb358de4ac9309b26acf49b2a100957e1ff5cc1e98e6c4996576710/cryptography-46.0.4-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:6bb5157bf6a350e5b28aee23beb2d84ae6f5be390b2f8ee7ea179cda077e1019", size = 4451216, upload-time = "2026-01-28T00:24:13.975Z" }, + { url = "https://files.pythonhosted.org/packages/96/2c/8d7f4171388a10208671e181ca43cdc0e596d8259ebacbbcfbd16de593da/cryptography-46.0.4-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:dd5aba870a2c40f87a3af043e0dee7d9eb02d4aff88a797b48f2b43eff8c3ab4", size = 4404299, upload-time = "2026-01-28T00:24:16.169Z" }, + { url = "https://files.pythonhosted.org/packages/e9/23/cbb2036e450980f65c6e0a173b73a56ff3bccd8998965dea5cc9ddd424a5/cryptography-46.0.4-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:93d8291da8d71024379ab2cb0b5c57915300155ad42e07f76bea6ad838d7e59b", size = 4664837, upload-time = "2026-01-28T00:24:17.629Z" }, + { url = "https://files.pythonhosted.org/packages/0a/21/f7433d18fe6d5845329cbdc597e30caf983229c7a245bcf54afecc555938/cryptography-46.0.4-cp38-abi3-win32.whl", hash = "sha256:0563655cb3c6d05fb2afe693340bc050c30f9f34e15763361cf08e94749401fc", size = 3009779, upload-time = "2026-01-28T00:24:20.198Z" }, + { url = "https://files.pythonhosted.org/packages/3a/6a/bd2e7caa2facffedf172a45c1a02e551e6d7d4828658c9a245516a598d94/cryptography-46.0.4-cp38-abi3-win_amd64.whl", hash = "sha256:fa0900b9ef9c49728887d1576fd8d9e7e3ea872fa9b25ef9b64888adc434e976", size = 3466633, upload-time = "2026-01-28T00:24:21.851Z" }, ] [[package]] @@ -1108,6 +1107,46 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/01/c9/97cc5aae1648dcb851958a3ddf73ccd7dbe5650d95203ecb4d7720b4cdbf/fsspec-2026.1.0-py3-none-any.whl", hash = "sha256:cb76aa913c2285a3b49bdd5fc55b1d7c708d7208126b60f2eb8194fe1b4cbdcc", size = 201838, upload-time = "2026-01-09T15:21:34.041Z" }, ] +[[package]] +name = "google-auth" +version = "2.48.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cryptography" }, + { name = "pyasn1-modules" }, + { name = "rsa" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0c/41/242044323fbd746615884b1c16639749e73665b718209946ebad7ba8a813/google_auth-2.48.0.tar.gz", hash = "sha256:4f7e706b0cd3208a3d940a19a822c37a476ddba5450156c3e6624a71f7c841ce", size = 326522, upload-time = "2026-01-26T19:22:47.157Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/83/1d/d6466de3a5249d35e832a52834115ca9d1d0de6abc22065f049707516d47/google_auth-2.48.0-py3-none-any.whl", hash = "sha256:2e2a537873d449434252a9632c28bfc268b0adb1e53f9fb62afc5333a975903f", size = 236499, upload-time = "2026-01-26T19:22:45.099Z" }, +] + +[package.optional-dependencies] +requests = [ + { name = "requests" }, +] + +[[package]] +name = "google-genai" +version = "1.60.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "distro" }, + { name = "google-auth", extra = ["requests"] }, + { name = "httpx" }, + { name = "pydantic" }, + { name = "requests" }, + { name = "sniffio" }, + { name = "tenacity" }, + { name = "typing-extensions" }, + { name = "websockets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0a/3f/a753be0dcee352b7d63bc6d1ba14a72591d63b6391dac0cdff7ac168c530/google_genai-1.60.0.tar.gz", hash = "sha256:9768061775fddfaecfefb0d6d7a6cabefb3952ebd246cd5f65247151c07d33d1", size = 487721, upload-time = "2026-01-21T22:17:30.398Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/31/e5/384b1f383917b5f0ae92e28f47bc27b16e3d26cd9bacb25e9f8ecab3c8fe/google_genai-1.60.0-py3-none-any.whl", hash = "sha256:967338378ffecebec19a8ed90cf8797b26818bacbefd7846a9280beb1099f7f3", size = 719431, upload-time = "2026-01-21T22:17:28.086Z" }, +] + [[package]] name = "greenlet" version = "3.3.1" @@ -1117,6 +1156,7 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/f9/c8/9d76a66421d1ae24340dfae7e79c313957f6e3195c144d2c73333b5bfe34/greenlet-3.3.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:7e806ca53acf6d15a888405880766ec84721aa4181261cd11a457dfe9a7a4975", size = 276443, upload-time = "2026-01-23T15:30:10.066Z" }, { url = "https://files.pythonhosted.org/packages/81/99/401ff34bb3c032d1f10477d199724f5e5f6fbfb59816ad1455c79c1eb8e7/greenlet-3.3.1-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d842c94b9155f1c9b3058036c24ffb8ff78b428414a19792b2380be9cecf4f36", size = 597359, upload-time = "2026-01-23T16:00:57.394Z" }, { url = "https://files.pythonhosted.org/packages/2b/bc/4dcc0871ed557792d304f50be0f7487a14e017952ec689effe2180a6ff35/greenlet-3.3.1-cp312-cp312-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:20fedaadd422fa02695f82093f9a98bad3dab5fcda793c658b945fcde2ab27ba", size = 607805, upload-time = "2026-01-23T16:05:28.068Z" }, + { url = "https://files.pythonhosted.org/packages/3b/cd/7a7ca57588dac3389e97f7c9521cb6641fd8b6602faf1eaa4188384757df/greenlet-3.3.1-cp312-cp312-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:c620051669fd04ac6b60ebc70478210119c56e2d5d5df848baec4312e260e4ca", size = 622363, upload-time = "2026-01-23T16:15:54.754Z" }, { url = "https://files.pythonhosted.org/packages/cf/05/821587cf19e2ce1f2b24945d890b164401e5085f9d09cbd969b0c193cd20/greenlet-3.3.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:14194f5f4305800ff329cbf02c5fcc88f01886cadd29941b807668a45f0d2336", size = 609947, upload-time = "2026-01-23T15:32:51.004Z" }, { url = "https://files.pythonhosted.org/packages/a4/52/ee8c46ed9f8babaa93a19e577f26e3d28a519feac6350ed6f25f1afee7e9/greenlet-3.3.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7b2fe4150a0cf59f847a67db8c155ac36aed89080a6a639e9f16df5d6c6096f1", size = 1567487, upload-time = "2026-01-23T16:04:22.125Z" }, { url = "https://files.pythonhosted.org/packages/8f/7c/456a74f07029597626f3a6db71b273a3632aecb9afafeeca452cfa633197/greenlet-3.3.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:49f4ad195d45f4a66a0eb9c1ba4832bb380570d361912fa3554746830d332149", size = 1636087, upload-time = "2026-01-23T15:33:47.486Z" }, @@ -1125,6 +1165,7 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ec/ab/d26750f2b7242c2b90ea2ad71de70cfcd73a948a49513188a0fc0d6fc15a/greenlet-3.3.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:7ab327905cabb0622adca5971e488064e35115430cec2c35a50fd36e72a315b3", size = 275205, upload-time = "2026-01-23T15:30:24.556Z" }, { url = "https://files.pythonhosted.org/packages/10/d3/be7d19e8fad7c5a78eeefb2d896a08cd4643e1e90c605c4be3b46264998f/greenlet-3.3.1-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:65be2f026ca6a176f88fb935ee23c18333ccea97048076aef4db1ef5bc0713ac", size = 599284, upload-time = "2026-01-23T16:00:58.584Z" }, { url = "https://files.pythonhosted.org/packages/ae/21/fe703aaa056fdb0f17e5afd4b5c80195bbdab701208918938bd15b00d39b/greenlet-3.3.1-cp313-cp313-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7a3ae05b3d225b4155bda56b072ceb09d05e974bc74be6c3fc15463cf69f33fd", size = 610274, upload-time = "2026-01-23T16:05:29.312Z" }, + { url = "https://files.pythonhosted.org/packages/06/00/95df0b6a935103c0452dad2203f5be8377e551b8466a29650c4c5a5af6cc/greenlet-3.3.1-cp313-cp313-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:12184c61e5d64268a160226fb4818af4df02cfead8379d7f8b99a56c3a54ff3e", size = 624375, upload-time = "2026-01-23T16:15:55.915Z" }, { url = "https://files.pythonhosted.org/packages/cb/86/5c6ab23bb3c28c21ed6bebad006515cfe08b04613eb105ca0041fecca852/greenlet-3.3.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6423481193bbbe871313de5fd06a082f2649e7ce6e08015d2a76c1e9186ca5b3", size = 612904, upload-time = "2026-01-23T15:32:52.317Z" }, { url = "https://files.pythonhosted.org/packages/c2/f3/7949994264e22639e40718c2daf6f6df5169bf48fb038c008a489ec53a50/greenlet-3.3.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:33a956fe78bbbda82bfc95e128d61129b32d66bcf0a20a1f0c08aa4839ffa951", size = 1567316, upload-time = "2026-01-23T16:04:23.316Z" }, { url = "https://files.pythonhosted.org/packages/8d/6e/d73c94d13b6465e9f7cd6231c68abde838bb22408596c05d9059830b7872/greenlet-3.3.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b065d3284be43728dd280f6f9a13990b56470b81be20375a207cdc814a983f2", size = 1636549, upload-time = "2026-01-23T15:33:48.643Z" }, @@ -1133,6 +1174,7 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ae/fb/011c7c717213182caf78084a9bea51c8590b0afda98001f69d9f853a495b/greenlet-3.3.1-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:bd59acd8529b372775cd0fcbc5f420ae20681c5b045ce25bd453ed8455ab99b5", size = 275737, upload-time = "2026-01-23T15:32:16.889Z" }, { url = "https://files.pythonhosted.org/packages/41/2e/a3a417d620363fdbb08a48b1dd582956a46a61bf8fd27ee8164f9dfe87c2/greenlet-3.3.1-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b31c05dd84ef6871dd47120386aed35323c944d86c3d91a17c4b8d23df62f15b", size = 646422, upload-time = "2026-01-23T16:01:00.354Z" }, { url = "https://files.pythonhosted.org/packages/b4/09/c6c4a0db47defafd2d6bab8ddfe47ad19963b4e30f5bed84d75328059f8c/greenlet-3.3.1-cp314-cp314-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:02925a0bfffc41e542c70aa14c7eda3593e4d7e274bfcccca1827e6c0875902e", size = 658219, upload-time = "2026-01-23T16:05:30.956Z" }, + { url = "https://files.pythonhosted.org/packages/e2/89/b95f2ddcc5f3c2bc09c8ee8d77be312df7f9e7175703ab780f2014a0e781/greenlet-3.3.1-cp314-cp314-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3e0f3878ca3a3ff63ab4ea478585942b53df66ddde327b59ecb191b19dbbd62d", size = 671455, upload-time = "2026-01-23T16:15:57.232Z" }, { url = "https://files.pythonhosted.org/packages/80/38/9d42d60dffb04b45f03dbab9430898352dba277758640751dc5cc316c521/greenlet-3.3.1-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:34a729e2e4e4ffe9ae2408d5ecaf12f944853f40ad724929b7585bca808a9d6f", size = 660237, upload-time = "2026-01-23T15:32:53.967Z" }, { url = "https://files.pythonhosted.org/packages/96/61/373c30b7197f9e756e4c81ae90a8d55dc3598c17673f91f4d31c3c689c3f/greenlet-3.3.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:aec9ab04e82918e623415947921dea15851b152b822661cce3f8e4393c3df683", size = 1615261, upload-time = "2026-01-23T16:04:25.066Z" }, { url = "https://files.pythonhosted.org/packages/fd/d3/ca534310343f5945316f9451e953dcd89b36fe7a19de652a1dc5a0eeef3f/greenlet-3.3.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:71c767cf281a80d02b6c1bdc41c9468e1f5a494fb11bc8688c360524e273d7b1", size = 1683719, upload-time = "2026-01-23T15:33:50.61Z" }, @@ -1141,6 +1183,7 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/28/24/cbbec49bacdcc9ec652a81d3efef7b59f326697e7edf6ed775a5e08e54c2/greenlet-3.3.1-cp314-cp314t-macosx_11_0_universal2.whl", hash = "sha256:3e63252943c921b90abb035ebe9de832c436401d9c45f262d80e2d06cc659242", size = 282706, upload-time = "2026-01-23T15:33:05.525Z" }, { url = "https://files.pythonhosted.org/packages/86/2e/4f2b9323c144c4fe8842a4e0d92121465485c3c2c5b9e9b30a52e80f523f/greenlet-3.3.1-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:76e39058e68eb125de10c92524573924e827927df5d3891fbc97bd55764a8774", size = 651209, upload-time = "2026-01-23T16:01:01.517Z" }, { url = "https://files.pythonhosted.org/packages/d9/87/50ca60e515f5bb55a2fbc5f0c9b5b156de7d2fc51a0a69abc9d23914a237/greenlet-3.3.1-cp314-cp314t-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c9f9d5e7a9310b7a2f416dd13d2e3fd8b42d803968ea580b7c0f322ccb389b97", size = 654300, upload-time = "2026-01-23T16:05:32.199Z" }, + { url = "https://files.pythonhosted.org/packages/7c/25/c51a63f3f463171e09cb586eb64db0861eb06667ab01a7968371a24c4f3b/greenlet-3.3.1-cp314-cp314t-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:4b9721549a95db96689458a1e0ae32412ca18776ed004463df3a9299c1b257ab", size = 662574, upload-time = "2026-01-23T16:15:58.364Z" }, { url = "https://files.pythonhosted.org/packages/1d/94/74310866dfa2b73dd08659a3d18762f83985ad3281901ba0ee9a815194fb/greenlet-3.3.1-cp314-cp314t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:92497c78adf3ac703b57f1e3813c2d874f27f71a178f9ea5887855da413cd6d2", size = 653842, upload-time = "2026-01-23T15:32:55.671Z" }, { url = "https://files.pythonhosted.org/packages/97/43/8bf0ffa3d498eeee4c58c212a3905dd6146c01c8dc0b0a046481ca29b18c/greenlet-3.3.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:ed6b402bc74d6557a705e197d47f9063733091ed6357b3de33619d8a8d93ac53", size = 1614917, upload-time = "2026-01-23T16:04:26.276Z" }, { url = "https://files.pythonhosted.org/packages/89/90/a3be7a5f378fc6e84abe4dcfb2ba32b07786861172e502388b4c90000d1b/greenlet-3.3.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:59913f1e5ada20fde795ba906916aea25d442abcc0593fba7e26c92b7ad76249", size = 1676092, upload-time = "2026-01-23T15:33:52.176Z" }, @@ -1524,7 +1567,7 @@ wheels = [ [[package]] name = "litellm" -version = "1.81.3" +version = "1.81.5" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohttp" }, @@ -1540,9 +1583,9 @@ dependencies = [ { name = "tiktoken" }, { name = "tokenizers" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ce/dd/d70835d5b231617761717cd5ba60342b677693093a71d5ce13ae9d254aee/litellm-1.81.3.tar.gz", hash = "sha256:a7688b429a88abfdd02f2a8c3158ebb5385689cfb7f9d4ac1473d018b2047e1b", size = 13612652, upload-time = "2026-01-25T02:45:58.888Z" } +sdist = { url = "https://files.pythonhosted.org/packages/38/f4/c109bc5504520baa7b96a910b619d1b1b5af6cb5c28053e53adfed83e3ab/litellm-1.81.5.tar.gz", hash = "sha256:599994651cbb64b8ee7cd3b4979275139afc6e426bdd4aa840a61121bb3b04c9", size = 13615436, upload-time = "2026-01-29T01:37:54.817Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/83/62/d3f53c665261fdd5bb2401246e005a4ea8194ad1c4d8c663318ae3d638bf/litellm-1.81.3-py3-none-any.whl", hash = "sha256:3f60fd8b727587952ad3dd18b68f5fed538d6f43d15bb0356f4c3a11bccb2b92", size = 11946995, upload-time = "2026-01-25T02:45:55.887Z" }, + { url = "https://files.pythonhosted.org/packages/74/0f/5312b944208efeec5dcbf8e0ed956f8f7c430b0c6458301d206380c90b56/litellm-1.81.5-py3-none-any.whl", hash = "sha256:206505c5a0c6503e465154b9c979772be3ede3f5bf746d15b37dca5ae54d239f", size = 11950016, upload-time = "2026-01-29T01:37:52.6Z" }, ] [[package]] @@ -1969,7 +2012,7 @@ wheels = [ [[package]] name = "openai" -version = "2.15.0" +version = "2.16.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -1981,9 +2024,9 @@ dependencies = [ { name = "tqdm" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/94/f4/4690ecb5d70023ce6bfcfeabfe717020f654bde59a775058ec6ac4692463/openai-2.15.0.tar.gz", hash = "sha256:42eb8cbb407d84770633f31bf727d4ffb4138711c670565a41663d9439174fba", size = 627383, upload-time = "2026-01-09T22:10:08.603Z" } +sdist = { url = "https://files.pythonhosted.org/packages/b1/6c/e4c964fcf1d527fdf4739e7cc940c60075a4114d50d03871d5d5b1e13a88/openai-2.16.0.tar.gz", hash = "sha256:42eaa22ca0d8ded4367a77374104d7a2feafee5bd60a107c3c11b5243a11cd12", size = 629649, upload-time = "2026-01-27T23:28:02.579Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b5/df/c306f7375d42bafb379934c2df4c2fa3964656c8c782bac75ee10c102818/openai-2.15.0-py3-none-any.whl", hash = "sha256:6ae23b932cd7230f7244e52954daa6602716d6b9bf235401a107af731baea6c3", size = 1067879, upload-time = "2026-01-09T22:10:06.446Z" }, + { url = "https://files.pythonhosted.org/packages/16/83/0315bf2cfd75a2ce8a7e54188e9456c60cec6c0cf66728ed07bd9859ff26/openai-2.16.0-py3-none-any.whl", hash = "sha256:5f46643a8f42899a84e80c38838135d7038e7718333ce61396994f887b09a59b", size = 1068612, upload-time = "2026-01-27T23:28:00.356Z" }, ] [[package]] @@ -2404,6 +2447,27 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/7f/60/22c9716033ced1ee1d800457126c4c79652a4ed635b0554c1d93742cc0a1/py_zerox-0.0.7-py3-none-any.whl", hash = "sha256:7b7d92cb6fafec91a94b63ba3c039b643fb3ee83545b15fa330ec07dd52f2058", size = 23347, upload-time = "2024-10-21T16:03:33.406Z" }, ] +[[package]] +name = "pyasn1" +version = "0.6.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fe/b6/6e630dff89739fcd427e3f72b3d905ce0acb85a45d4ec3e2678718a3487f/pyasn1-0.6.2.tar.gz", hash = "sha256:9b59a2b25ba7e4f8197db7686c09fb33e658b98339fadb826e9512629017833b", size = 146586, upload-time = "2026-01-16T18:04:18.534Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/44/b5/a96872e5184f354da9c84ae119971a0a4c221fe9b27a4d94bd43f2596727/pyasn1-0.6.2-py3-none-any.whl", hash = "sha256:1eb26d860996a18e9b6ed05e7aae0e9fc21619fcee6af91cca9bad4fbea224bf", size = 83371, upload-time = "2026-01-16T18:04:17.174Z" }, +] + +[[package]] +name = "pyasn1-modules" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e9/e6/78ebbb10a8c8e4b61a59249394a4a594c1a7af95593dc933a349c8d00964/pyasn1_modules-0.4.2.tar.gz", hash = "sha256:677091de870a80aae844b1ca6134f54652fa2c8c5a52aa396440ac3106e941e6", size = 307892, upload-time = "2025-03-28T02:41:22.17Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/47/8d/d529b5d697919ba8c11ad626e835d4039be708a35b0d22de83a269a6682c/pyasn1_modules-0.4.2-py3-none-any.whl", hash = "sha256:29253a9207ce32b64c3ac6600edc75368f98473906e8fd1043bd6b5b1de2c14a", size = 181259, upload-time = "2025-03-28T02:41:19.028Z" }, +] + [[package]] name = "pycparser" version = "3.0" @@ -3013,6 +3077,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d0/02/fa464cdfbe6b26e0600b62c528b72d8608f5cc49f96b8d6e38c95d60c676/rpds_py-0.30.0-cp314-cp314t-win_amd64.whl", hash = "sha256:27f4b0e92de5bfbc6f86e43959e6edd1425c33b5e69aab0984a72047f2bcf1e3", size = 226532, upload-time = "2025-11-30T20:24:14.634Z" }, ] +[[package]] +name = "rsa" +version = "4.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/da/8a/22b7beea3ee0d44b1916c0c1cb0ee3af23b700b6da9f04991899d0c555d4/rsa-4.9.1.tar.gz", hash = "sha256:e7bdbfdb5497da4c07dfd35530e1a902659db6ff241e39d9953cad06ebd0ae75", size = 29034, upload-time = "2025-04-16T09:51:18.218Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/64/8d/0133e4eb4beed9e425d9a98ed6e081a55d195481b7632472be1af08d2f6b/rsa-4.9.1-py3-none-any.whl", hash = "sha256:68635866661c6836b8d39430f97a996acbd61bfa49406748ea243539fe239762", size = 34696, upload-time = "2025-04-16T09:51:17.142Z" }, +] + [[package]] name = "ruff" version = "0.14.14" @@ -3158,15 +3234,15 @@ wheels = [ [[package]] name = "sentry-sdk" -version = "2.50.0" +version = "2.51.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "certifi" }, { name = "urllib3" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/15/8a/3c4f53d32c21012e9870913544e56bfa9e931aede080779a0f177513f534/sentry_sdk-2.50.0.tar.gz", hash = "sha256:873437a989ee1b8b25579847bae8384515bf18cfed231b06c591b735c1781fe3", size = 401233, upload-time = "2026-01-20T12:53:16.244Z" } +sdist = { url = "https://files.pythonhosted.org/packages/6f/9f/094bbb6be5cf218ab6712c6528310687f3d3fe8818249fcfe1d74192f7c5/sentry_sdk-2.51.0.tar.gz", hash = "sha256:b89d64577075fd8c13088bc3609a2ce77a154e5beb8cba7cc16560b0539df4f7", size = 407447, upload-time = "2026-01-28T10:29:50.962Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/4e/5b/cbc2bb9569f03c8e15d928357e7e6179e5cfab45544a3bbac8aec4caf9be/sentry_sdk-2.50.0-py2.py3-none-any.whl", hash = "sha256:0ef0ed7168657ceb5a0be081f4102d92042a125462d1d1a29277992e344e749e", size = 424961, upload-time = "2026-01-20T12:53:14.826Z" }, + { url = "https://files.pythonhosted.org/packages/a0/da/df379404d484ca9dede4ad8abead5de828cdcff35623cd44f0351cf6869c/sentry_sdk-2.51.0-py2.py3-none-any.whl", hash = "sha256:e21016d318a097c2b617bb980afd9fc737e1efc55f9b4f0cdc819982c9717d5f", size = 431426, upload-time = "2026-01-28T10:29:48.868Z" }, ] [package.optional-dependencies] @@ -3639,47 +3715,33 @@ wheels = [ [[package]] name = "websockets" -version = "16.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/04/24/4b2031d72e840ce4c1ccb255f693b15c334757fc50023e4db9537080b8c4/websockets-16.0.tar.gz", hash = "sha256:5f6261a5e56e8d5c42a4497b364ea24d94d9563e8fbd44e78ac40879c60179b5", size = 179346, upload-time = "2026-01-10T09:23:47.181Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/84/7b/bac442e6b96c9d25092695578dda82403c77936104b5682307bd4deb1ad4/websockets-16.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:71c989cbf3254fbd5e84d3bff31e4da39c43f884e64f2551d14bb3c186230f00", size = 177365, upload-time = "2026-01-10T09:22:46.787Z" }, - { url = "https://files.pythonhosted.org/packages/b0/fe/136ccece61bd690d9c1f715baaeefd953bb2360134de73519d5df19d29ca/websockets-16.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:8b6e209ffee39ff1b6d0fa7bfef6de950c60dfb91b8fcead17da4ee539121a79", size = 175038, upload-time = "2026-01-10T09:22:47.999Z" }, - { url = "https://files.pythonhosted.org/packages/40/1e/9771421ac2286eaab95b8575b0cb701ae3663abf8b5e1f64f1fd90d0a673/websockets-16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:86890e837d61574c92a97496d590968b23c2ef0aeb8a9bc9421d174cd378ae39", size = 175328, upload-time = "2026-01-10T09:22:49.809Z" }, - { url = "https://files.pythonhosted.org/packages/18/29/71729b4671f21e1eaa5d6573031ab810ad2936c8175f03f97f3ff164c802/websockets-16.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:9b5aca38b67492ef518a8ab76851862488a478602229112c4b0d58d63a7a4d5c", size = 184915, upload-time = "2026-01-10T09:22:51.071Z" }, - { url = "https://files.pythonhosted.org/packages/97/bb/21c36b7dbbafc85d2d480cd65df02a1dc93bf76d97147605a8e27ff9409d/websockets-16.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e0334872c0a37b606418ac52f6ab9cfd17317ac26365f7f65e203e2d0d0d359f", size = 186152, upload-time = "2026-01-10T09:22:52.224Z" }, - { url = "https://files.pythonhosted.org/packages/4a/34/9bf8df0c0cf88fa7bfe36678dc7b02970c9a7d5e065a3099292db87b1be2/websockets-16.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a0b31e0b424cc6b5a04b8838bbaec1688834b2383256688cf47eb97412531da1", size = 185583, upload-time = "2026-01-10T09:22:53.443Z" }, - { url = "https://files.pythonhosted.org/packages/47/88/4dd516068e1a3d6ab3c7c183288404cd424a9a02d585efbac226cb61ff2d/websockets-16.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:485c49116d0af10ac698623c513c1cc01c9446c058a4e61e3bf6c19dff7335a2", size = 184880, upload-time = "2026-01-10T09:22:55.033Z" }, - { url = "https://files.pythonhosted.org/packages/91/d6/7d4553ad4bf1c0421e1ebd4b18de5d9098383b5caa1d937b63df8d04b565/websockets-16.0-cp312-cp312-win32.whl", hash = "sha256:eaded469f5e5b7294e2bdca0ab06becb6756ea86894a47806456089298813c89", size = 178261, upload-time = "2026-01-10T09:22:56.251Z" }, - { url = "https://files.pythonhosted.org/packages/c3/f0/f3a17365441ed1c27f850a80b2bc680a0fa9505d733fe152fdf5e98c1c0b/websockets-16.0-cp312-cp312-win_amd64.whl", hash = "sha256:5569417dc80977fc8c2d43a86f78e0a5a22fee17565d78621b6bb264a115d4ea", size = 178693, upload-time = "2026-01-10T09:22:57.478Z" }, - { url = "https://files.pythonhosted.org/packages/cc/9c/baa8456050d1c1b08dd0ec7346026668cbc6f145ab4e314d707bb845bf0d/websockets-16.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:878b336ac47938b474c8f982ac2f7266a540adc3fa4ad74ae96fea9823a02cc9", size = 177364, upload-time = "2026-01-10T09:22:59.333Z" }, - { url = "https://files.pythonhosted.org/packages/7e/0c/8811fc53e9bcff68fe7de2bcbe75116a8d959ac699a3200f4847a8925210/websockets-16.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:52a0fec0e6c8d9a784c2c78276a48a2bdf099e4ccc2a4cad53b27718dbfd0230", size = 175039, upload-time = "2026-01-10T09:23:01.171Z" }, - { url = "https://files.pythonhosted.org/packages/aa/82/39a5f910cb99ec0b59e482971238c845af9220d3ab9fa76dd9162cda9d62/websockets-16.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e6578ed5b6981005df1860a56e3617f14a6c307e6a71b4fff8c48fdc50f3ed2c", size = 175323, upload-time = "2026-01-10T09:23:02.341Z" }, - { url = "https://files.pythonhosted.org/packages/bd/28/0a25ee5342eb5d5f297d992a77e56892ecb65e7854c7898fb7d35e9b33bd/websockets-16.0-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:95724e638f0f9c350bb1c2b0a7ad0e83d9cc0c9259f3ea94e40d7b02a2179ae5", size = 184975, upload-time = "2026-01-10T09:23:03.756Z" }, - { url = "https://files.pythonhosted.org/packages/f9/66/27ea52741752f5107c2e41fda05e8395a682a1e11c4e592a809a90c6a506/websockets-16.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c0204dc62a89dc9d50d682412c10b3542d748260d743500a85c13cd1ee4bde82", size = 186203, upload-time = "2026-01-10T09:23:05.01Z" }, - { url = "https://files.pythonhosted.org/packages/37/e5/8e32857371406a757816a2b471939d51c463509be73fa538216ea52b792a/websockets-16.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:52ac480f44d32970d66763115edea932f1c5b1312de36df06d6b219f6741eed8", size = 185653, upload-time = "2026-01-10T09:23:06.301Z" }, - { url = "https://files.pythonhosted.org/packages/9b/67/f926bac29882894669368dc73f4da900fcdf47955d0a0185d60103df5737/websockets-16.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6e5a82b677f8f6f59e8dfc34ec06ca6b5b48bc4fcda346acd093694cc2c24d8f", size = 184920, upload-time = "2026-01-10T09:23:07.492Z" }, - { url = "https://files.pythonhosted.org/packages/3c/a1/3d6ccdcd125b0a42a311bcd15a7f705d688f73b2a22d8cf1c0875d35d34a/websockets-16.0-cp313-cp313-win32.whl", hash = "sha256:abf050a199613f64c886ea10f38b47770a65154dc37181bfaff70c160f45315a", size = 178255, upload-time = "2026-01-10T09:23:09.245Z" }, - { url = "https://files.pythonhosted.org/packages/6b/ae/90366304d7c2ce80f9b826096a9e9048b4bb760e44d3b873bb272cba696b/websockets-16.0-cp313-cp313-win_amd64.whl", hash = "sha256:3425ac5cf448801335d6fdc7ae1eb22072055417a96cc6b31b3861f455fbc156", size = 178689, upload-time = "2026-01-10T09:23:10.483Z" }, - { url = "https://files.pythonhosted.org/packages/f3/1d/e88022630271f5bd349ed82417136281931e558d628dd52c4d8621b4a0b2/websockets-16.0-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:8cc451a50f2aee53042ac52d2d053d08bf89bcb31ae799cb4487587661c038a0", size = 177406, upload-time = "2026-01-10T09:23:12.178Z" }, - { url = "https://files.pythonhosted.org/packages/f2/78/e63be1bf0724eeb4616efb1ae1c9044f7c3953b7957799abb5915bffd38e/websockets-16.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:daa3b6ff70a9241cf6c7fc9e949d41232d9d7d26fd3522b1ad2b4d62487e9904", size = 175085, upload-time = "2026-01-10T09:23:13.511Z" }, - { url = "https://files.pythonhosted.org/packages/bb/f4/d3c9220d818ee955ae390cf319a7c7a467beceb24f05ee7aaaa2414345ba/websockets-16.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:fd3cb4adb94a2a6e2b7c0d8d05cb94e6f1c81a0cf9dc2694fb65c7e8d94c42e4", size = 175328, upload-time = "2026-01-10T09:23:14.727Z" }, - { url = "https://files.pythonhosted.org/packages/63/bc/d3e208028de777087e6fb2b122051a6ff7bbcca0d6df9d9c2bf1dd869ae9/websockets-16.0-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:781caf5e8eee67f663126490c2f96f40906594cb86b408a703630f95550a8c3e", size = 185044, upload-time = "2026-01-10T09:23:15.939Z" }, - { url = "https://files.pythonhosted.org/packages/ad/6e/9a0927ac24bd33a0a9af834d89e0abc7cfd8e13bed17a86407a66773cc0e/websockets-16.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:caab51a72c51973ca21fa8a18bd8165e1a0183f1ac7066a182ff27107b71e1a4", size = 186279, upload-time = "2026-01-10T09:23:17.148Z" }, - { url = "https://files.pythonhosted.org/packages/b9/ca/bf1c68440d7a868180e11be653c85959502efd3a709323230314fda6e0b3/websockets-16.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:19c4dc84098e523fd63711e563077d39e90ec6702aff4b5d9e344a60cb3c0cb1", size = 185711, upload-time = "2026-01-10T09:23:18.372Z" }, - { url = "https://files.pythonhosted.org/packages/c4/f8/fdc34643a989561f217bb477cbc47a3a07212cbda91c0e4389c43c296ebf/websockets-16.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:a5e18a238a2b2249c9a9235466b90e96ae4795672598a58772dd806edc7ac6d3", size = 184982, upload-time = "2026-01-10T09:23:19.652Z" }, - { url = "https://files.pythonhosted.org/packages/dd/d1/574fa27e233764dbac9c52730d63fcf2823b16f0856b3329fc6268d6ae4f/websockets-16.0-cp314-cp314-win32.whl", hash = "sha256:a069d734c4a043182729edd3e9f247c3b2a4035415a9172fd0f1b71658a320a8", size = 177915, upload-time = "2026-01-10T09:23:21.458Z" }, - { url = "https://files.pythonhosted.org/packages/8a/f1/ae6b937bf3126b5134ce1f482365fde31a357c784ac51852978768b5eff4/websockets-16.0-cp314-cp314-win_amd64.whl", hash = "sha256:c0ee0e63f23914732c6d7e0cce24915c48f3f1512ec1d079ed01fc629dab269d", size = 178381, upload-time = "2026-01-10T09:23:22.715Z" }, - { url = "https://files.pythonhosted.org/packages/06/9b/f791d1db48403e1f0a27577a6beb37afae94254a8c6f08be4a23e4930bc0/websockets-16.0-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:a35539cacc3febb22b8f4d4a99cc79b104226a756aa7400adc722e83b0d03244", size = 177737, upload-time = "2026-01-10T09:23:24.523Z" }, - { url = "https://files.pythonhosted.org/packages/bd/40/53ad02341fa33b3ce489023f635367a4ac98b73570102ad2cdd770dacc9a/websockets-16.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:b784ca5de850f4ce93ec85d3269d24d4c82f22b7212023c974c401d4980ebc5e", size = 175268, upload-time = "2026-01-10T09:23:25.781Z" }, - { url = "https://files.pythonhosted.org/packages/74/9b/6158d4e459b984f949dcbbb0c5d270154c7618e11c01029b9bbd1bb4c4f9/websockets-16.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:569d01a4e7fba956c5ae4fc988f0d4e187900f5497ce46339c996dbf24f17641", size = 175486, upload-time = "2026-01-10T09:23:27.033Z" }, - { url = "https://files.pythonhosted.org/packages/e5/2d/7583b30208b639c8090206f95073646c2c9ffd66f44df967981a64f849ad/websockets-16.0-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:50f23cdd8343b984957e4077839841146f67a3d31ab0d00e6b824e74c5b2f6e8", size = 185331, upload-time = "2026-01-10T09:23:28.259Z" }, - { url = "https://files.pythonhosted.org/packages/45/b0/cce3784eb519b7b5ad680d14b9673a31ab8dcb7aad8b64d81709d2430aa8/websockets-16.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:152284a83a00c59b759697b7f9e9cddf4e3c7861dd0d964b472b70f78f89e80e", size = 186501, upload-time = "2026-01-10T09:23:29.449Z" }, - { url = "https://files.pythonhosted.org/packages/19/60/b8ebe4c7e89fb5f6cdf080623c9d92789a53636950f7abacfc33fe2b3135/websockets-16.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:bc59589ab64b0022385f429b94697348a6a234e8ce22544e3681b2e9331b5944", size = 186062, upload-time = "2026-01-10T09:23:31.368Z" }, - { url = "https://files.pythonhosted.org/packages/88/a8/a080593f89b0138b6cba1b28f8df5673b5506f72879322288b031337c0b8/websockets-16.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:32da954ffa2814258030e5a57bc73a3635463238e797c7375dc8091327434206", size = 185356, upload-time = "2026-01-10T09:23:32.627Z" }, - { url = "https://files.pythonhosted.org/packages/c2/b6/b9afed2afadddaf5ebb2afa801abf4b0868f42f8539bfe4b071b5266c9fe/websockets-16.0-cp314-cp314t-win32.whl", hash = "sha256:5a4b4cc550cb665dd8a47f868c8d04c8230f857363ad3c9caf7a0c3bf8c61ca6", size = 178085, upload-time = "2026-01-10T09:23:33.816Z" }, - { url = "https://files.pythonhosted.org/packages/9f/3e/28135a24e384493fa804216b79a6a6759a38cc4ff59118787b9fb693df93/websockets-16.0-cp314-cp314t-win_amd64.whl", hash = "sha256:b14dc141ed6d2dde437cddb216004bcac6a1df0935d79656387bd41632ba0bbd", size = 178531, upload-time = "2026-01-10T09:23:35.016Z" }, - { url = "https://files.pythonhosted.org/packages/6f/28/258ebab549c2bf3e64d2b0217b973467394a9cea8c42f70418ca2c5d0d2e/websockets-16.0-py3-none-any.whl", hash = "sha256:1637db62fad1dc833276dded54215f2c7fa46912301a24bd94d45d46a011ceec", size = 171598, upload-time = "2026-01-10T09:23:45.395Z" }, +version = "15.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/21/e6/26d09fab466b7ca9c7737474c52be4f76a40301b08362eb2dbc19dcc16c1/websockets-15.0.1.tar.gz", hash = "sha256:82544de02076bafba038ce055ee6412d68da13ab47f0c60cab827346de828dee", size = 177016, upload-time = "2025-03-05T20:03:41.606Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/51/6b/4545a0d843594f5d0771e86463606a3988b5a09ca5123136f8a76580dd63/websockets-15.0.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:3e90baa811a5d73f3ca0bcbf32064d663ed81318ab225ee4f427ad4e26e5aff3", size = 175437, upload-time = "2025-03-05T20:02:16.706Z" }, + { url = "https://files.pythonhosted.org/packages/f4/71/809a0f5f6a06522af902e0f2ea2757f71ead94610010cf570ab5c98e99ed/websockets-15.0.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:592f1a9fe869c778694f0aa806ba0374e97648ab57936f092fd9d87f8bc03665", size = 173096, upload-time = "2025-03-05T20:02:18.832Z" }, + { url = "https://files.pythonhosted.org/packages/3d/69/1a681dd6f02180916f116894181eab8b2e25b31e484c5d0eae637ec01f7c/websockets-15.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0701bc3cfcb9164d04a14b149fd74be7347a530ad3bbf15ab2c678a2cd3dd9a2", size = 173332, upload-time = "2025-03-05T20:02:20.187Z" }, + { url = "https://files.pythonhosted.org/packages/a6/02/0073b3952f5bce97eafbb35757f8d0d54812b6174ed8dd952aa08429bcc3/websockets-15.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8b56bdcdb4505c8078cb6c7157d9811a85790f2f2b3632c7d1462ab5783d215", size = 183152, upload-time = "2025-03-05T20:02:22.286Z" }, + { url = "https://files.pythonhosted.org/packages/74/45/c205c8480eafd114b428284840da0b1be9ffd0e4f87338dc95dc6ff961a1/websockets-15.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0af68c55afbd5f07986df82831c7bff04846928ea8d1fd7f30052638788bc9b5", size = 182096, upload-time = "2025-03-05T20:02:24.368Z" }, + { url = "https://files.pythonhosted.org/packages/14/8f/aa61f528fba38578ec553c145857a181384c72b98156f858ca5c8e82d9d3/websockets-15.0.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64dee438fed052b52e4f98f76c5790513235efaa1ef7f3f2192c392cd7c91b65", size = 182523, upload-time = "2025-03-05T20:02:25.669Z" }, + { url = "https://files.pythonhosted.org/packages/ec/6d/0267396610add5bc0d0d3e77f546d4cd287200804fe02323797de77dbce9/websockets-15.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d5f6b181bb38171a8ad1d6aa58a67a6aa9d4b38d0f8c5f496b9e42561dfc62fe", size = 182790, upload-time = "2025-03-05T20:02:26.99Z" }, + { url = "https://files.pythonhosted.org/packages/02/05/c68c5adbf679cf610ae2f74a9b871ae84564462955d991178f95a1ddb7dd/websockets-15.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:5d54b09eba2bada6011aea5375542a157637b91029687eb4fdb2dab11059c1b4", size = 182165, upload-time = "2025-03-05T20:02:30.291Z" }, + { url = "https://files.pythonhosted.org/packages/29/93/bb672df7b2f5faac89761cb5fa34f5cec45a4026c383a4b5761c6cea5c16/websockets-15.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3be571a8b5afed347da347bfcf27ba12b069d9d7f42cb8c7028b5e98bbb12597", size = 182160, upload-time = "2025-03-05T20:02:31.634Z" }, + { url = "https://files.pythonhosted.org/packages/ff/83/de1f7709376dc3ca9b7eeb4b9a07b4526b14876b6d372a4dc62312bebee0/websockets-15.0.1-cp312-cp312-win32.whl", hash = "sha256:c338ffa0520bdb12fbc527265235639fb76e7bc7faafbb93f6ba80d9c06578a9", size = 176395, upload-time = "2025-03-05T20:02:33.017Z" }, + { url = "https://files.pythonhosted.org/packages/7d/71/abf2ebc3bbfa40f391ce1428c7168fb20582d0ff57019b69ea20fa698043/websockets-15.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:fcd5cf9e305d7b8338754470cf69cf81f420459dbae8a3b40cee57417f4614a7", size = 176841, upload-time = "2025-03-05T20:02:34.498Z" }, + { url = "https://files.pythonhosted.org/packages/cb/9f/51f0cf64471a9d2b4d0fc6c534f323b664e7095640c34562f5182e5a7195/websockets-15.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ee443ef070bb3b6ed74514f5efaa37a252af57c90eb33b956d35c8e9c10a1931", size = 175440, upload-time = "2025-03-05T20:02:36.695Z" }, + { url = "https://files.pythonhosted.org/packages/8a/05/aa116ec9943c718905997412c5989f7ed671bc0188ee2ba89520e8765d7b/websockets-15.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5a939de6b7b4e18ca683218320fc67ea886038265fd1ed30173f5ce3f8e85675", size = 173098, upload-time = "2025-03-05T20:02:37.985Z" }, + { url = "https://files.pythonhosted.org/packages/ff/0b/33cef55ff24f2d92924923c99926dcce78e7bd922d649467f0eda8368923/websockets-15.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:746ee8dba912cd6fc889a8147168991d50ed70447bf18bcda7039f7d2e3d9151", size = 173329, upload-time = "2025-03-05T20:02:39.298Z" }, + { url = "https://files.pythonhosted.org/packages/31/1d/063b25dcc01faa8fada1469bdf769de3768b7044eac9d41f734fd7b6ad6d/websockets-15.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:595b6c3969023ecf9041b2936ac3827e4623bfa3ccf007575f04c5a6aa318c22", size = 183111, upload-time = "2025-03-05T20:02:40.595Z" }, + { url = "https://files.pythonhosted.org/packages/93/53/9a87ee494a51bf63e4ec9241c1ccc4f7c2f45fff85d5bde2ff74fcb68b9e/websockets-15.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c714d2fc58b5ca3e285461a4cc0c9a66bd0e24c5da9911e30158286c9b5be7f", size = 182054, upload-time = "2025-03-05T20:02:41.926Z" }, + { url = "https://files.pythonhosted.org/packages/ff/b2/83a6ddf56cdcbad4e3d841fcc55d6ba7d19aeb89c50f24dd7e859ec0805f/websockets-15.0.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f3c1e2ab208db911594ae5b4f79addeb3501604a165019dd221c0bdcabe4db8", size = 182496, upload-time = "2025-03-05T20:02:43.304Z" }, + { url = "https://files.pythonhosted.org/packages/98/41/e7038944ed0abf34c45aa4635ba28136f06052e08fc2168520bb8b25149f/websockets-15.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:229cf1d3ca6c1804400b0a9790dc66528e08a6a1feec0d5040e8b9eb14422375", size = 182829, upload-time = "2025-03-05T20:02:48.812Z" }, + { url = "https://files.pythonhosted.org/packages/e0/17/de15b6158680c7623c6ef0db361da965ab25d813ae54fcfeae2e5b9ef910/websockets-15.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:756c56e867a90fb00177d530dca4b097dd753cde348448a1012ed6c5131f8b7d", size = 182217, upload-time = "2025-03-05T20:02:50.14Z" }, + { url = "https://files.pythonhosted.org/packages/33/2b/1f168cb6041853eef0362fb9554c3824367c5560cbdaad89ac40f8c2edfc/websockets-15.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:558d023b3df0bffe50a04e710bc87742de35060580a293c2a984299ed83bc4e4", size = 182195, upload-time = "2025-03-05T20:02:51.561Z" }, + { url = "https://files.pythonhosted.org/packages/86/eb/20b6cdf273913d0ad05a6a14aed4b9a85591c18a987a3d47f20fa13dcc47/websockets-15.0.1-cp313-cp313-win32.whl", hash = "sha256:ba9e56e8ceeeedb2e080147ba85ffcd5cd0711b89576b83784d8605a7df455fa", size = 176393, upload-time = "2025-03-05T20:02:53.814Z" }, + { url = "https://files.pythonhosted.org/packages/1b/6c/c65773d6cab416a64d191d6ee8a8b1c68a09970ea6909d16965d26bfed1e/websockets-15.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:e09473f095a819042ecb2ab9465aee615bd9c2028e4ef7d933600a8401c79561", size = 176837, upload-time = "2025-03-05T20:02:55.237Z" }, + { url = "https://files.pythonhosted.org/packages/fa/a8/5b41e0da817d64113292ab1f8247140aac61cbf6cfd085d6a0fa77f4984f/websockets-15.0.1-py3-none-any.whl", hash = "sha256:f7a866fbc1e97b5c617ee4116daaa09b722101d4a3c170c787450ba409f9736f", size = 169743, upload-time = "2025-03-05T20:03:39.41Z" }, ] [[package]]