From 7e5ff49d89e8982ae6d16e1333a49f9f5835cf45 Mon Sep 17 00:00:00 2001 From: Dinesh Reddy Date: Mon, 23 Mar 2026 12:13:36 -0700 Subject: [PATCH] fix: exclude null fields from API payloads to match Node SDK behavior GenerationConfig.model_dump() was sending all null fields (response_model, prompt, json_schema, etc.) and AgentSkill null fields (skill_id, name, source, bundle) in every request. This caused bloated payloads with ~20 unnecessary null fields vs the Node SDK which omits them. Changes: - GenerationConfig.model_dump(): default to exclude_none=True, always strip the internal response_model field - document.generate(): only include domain/callback_url when non-None - execute()/image.generate(): same callback_url cleanup Made-with: Cursor --- vlmrun/client/predictions.py | 55 ++++++++++++++++++++---------------- vlmrun/client/types.py | 11 ++++---- 2 files changed, 37 insertions(+), 29 deletions(-) diff --git a/vlmrun/client/predictions.py b/vlmrun/client/predictions.py index 09eee42..06b577f 100644 --- a/vlmrun/client/predictions.py +++ b/vlmrun/client/predictions.py @@ -323,17 +323,19 @@ def generate( additional_kwargs["config"] = config.model_dump() if metadata: additional_kwargs["metadata"] = metadata.model_dump() + data: dict = { + "model": model, + "images": images_data, + "domain": domain, + "batch": batch, + **additional_kwargs, + } + if callback_url is not None: + data["callback_url"] = callback_url response, status_code, headers = self._requestor.request( method="POST", url="image/generate", - data={ - "model": model, - "images": images_data, - "domain": domain, - "batch": batch, - "callback_url": callback_url, - **additional_kwargs, - }, + data=data, ) if not isinstance(response, dict): raise TypeError("Expected dict response") @@ -453,17 +455,20 @@ def generate( additional_kwargs["config"] = config.model_dump() if metadata: additional_kwargs["metadata"] = metadata.model_dump() + data: dict = { + "model": model, + "url" if is_url else "file_id": file_or_url, + "batch": batch, + **additional_kwargs, + } + if domain is not None: + data["domain"] = domain + if callback_url is not None: + data["callback_url"] = callback_url response, status_code, headers = self._requestor.request( method="POST", url=f"{route}/generate", - data={ - "model": model, - "url" if is_url else "file_id": file_or_url, - "domain": domain, - "batch": batch, - "callback_url": callback_url, - **additional_kwargs, - }, + data=data, ) if not isinstance(response, dict): raise TypeError("Expected dict response") @@ -519,17 +524,19 @@ def execute( additional_kwargs["config"] = config.model_dump() if metadata: additional_kwargs["metadata"] = metadata.model_dump() + data: dict = { + "name": name, + "version": version, + "url" if is_url else "file_id": file_or_url, + "batch": batch, + **additional_kwargs, + } + if callback_url is not None: + data["callback_url"] = callback_url response, status_code, headers = self._requestor.request( method="POST", url=f"{route}/execute", - data={ - "name": name, - "version": version, - "url" if is_url else "file_id": file_or_url, - "batch": batch, - "callback_url": callback_url, - **additional_kwargs, - }, + data=data, ) if not isinstance(response, dict): raise TypeError("Expected dict response") diff --git a/vlmrun/client/types.py b/vlmrun/client/types.py index 8aceef6..28c418d 100644 --- a/vlmrun/client/types.py +++ b/vlmrun/client/types.py @@ -505,20 +505,21 @@ class GenerationConfig(BaseModel): def model_dump(self, **kwargs) -> dict: """Dump the config as a dictionary, converting response_model to json_schema if present.""" - data = super().model_dump(**kwargs) - if self.response_model and self.json_schema: raise ValueError( "`response_model` and `json_schema` cannot be used together" ) + kwargs.setdefault("exclude_none", True) + data = super().model_dump(**kwargs) + if self.response_model is not None: assert ( self.json_schema is None ), "`response_model` and `json_schema` cannot be used together" - json_schema = self.response_model.model_json_schema() - data["json_schema"] = json_schema - data.pop("response_model", None) + data["json_schema"] = self.response_model.model_json_schema() + + data.pop("response_model", None) return data