diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d3cf4f36..4cbeb613 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -33,9 +33,8 @@ jobs: - name: Compile run: yarn && yarn test env: - OPENAI_KEY: ${{ secrets.OPENAI_KEY }} - ANTHROPIC_KEY: ${{ secrets.ANTHROPIC_KEY }} - COHERE_KEY: ${{ secrets.COHERE_KEY }} + HUMANLOOP_API_KEY: ${{ secrets.HUMANLOOP_API_KEY }} + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} publish: needs: [compile, test] diff --git a/jest.config.mjs b/jest.config.mjs index c7248211..9eb4a4af 100644 --- a/jest.config.mjs +++ b/jest.config.mjs @@ -3,6 +3,6 @@ export default { preset: "ts-jest", testEnvironment: "node", moduleNameMapper: { - "(.+)\.js$": "$1", + "^(?!.*node_modules)(.+)\\.js$": "$1", }, }; diff --git a/package.json b/package.json index d47c7f4d..ceeadcf3 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "humanloop", - "version": "0.8.20", + "version": "0.8.21-beta1", "private": false, "repository": "https://github.com/humanloop/humanloop-node", "main": "./index.js", diff --git a/reference.md b/reference.md index aaa57c68..afc2e70e 100644 --- a/reference.md +++ b/reference.md @@ -1342,8 +1342,226 @@ await client.prompts.updateMonitoring("pr_30gco7dx6JDq4200GVOHa", { +
client.prompts.serialize(id, { ...params }) -> void +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Serialize a Prompt to the .prompt file format. + +Useful for storing the Prompt with your code in a version control system, +or for editing with an AI tool. + +By default, the deployed version of the Prompt is returned. Use the query parameters +`version_id` or `environment` to target a specific version of the Prompt. + +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```typescript +await client.prompts.serialize("id"); +``` + +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `string` — Unique identifier for Prompt. + +
+
+ +
+
+ +**request:** `Humanloop.SerializePromptsIdSerializeGetRequest` + +
+
+ +
+
+ +**requestOptions:** `Prompts.RequestOptions` + +
+
+
+
+ +
+
+
+ +
client.prompts.deserialize({ ...params }) -> Humanloop.PromptKernelRequest +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Deserialize a Prompt from the .prompt file format. + +This returns a subset of the attributes required by a Prompt. +This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc) + +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```typescript +await client.prompts.deserialize({ + prompt: "prompt", +}); +``` + +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request:** `Humanloop.BodyDeserializePromptsDeserializePost` + +
+
+ +
+
+ +**requestOptions:** `Prompts.RequestOptions` + +
+
+
+
+ +
+
+
+ ## Tools +
client.tools.call({ ...params }) -> Humanloop.ToolCallResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Call a Tool. + +Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop. + +You can use query parameters `version_id`, or `environment`, to target +an existing version of the Tool. Otherwise, the default deployed version will be chosen. + +Instead of targeting an existing version explicitly, you can instead pass in +Tool details in the request body. In this case, we will check if the details correspond +to an existing version of the Tool. If they do not, we will create a new version. This is helpful +in the case where you are storing or deriving your Tool details in code. + +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```typescript +await client.tools.call(); +``` + +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request:** `Humanloop.ToolCallRequest` + +
+
+ +
+
+ +**requestOptions:** `Tools.RequestOptions` + +
+
+
+
+ +
+
+
+
client.tools.log({ ...params }) -> Humanloop.CreateToolLogResponse
@@ -2418,27 +2636,10 @@ await client.tools.updateMonitoring("tl_789ghi", {
-## Datasets - -
client.datasets.list({ ...params }) -> core.Page -
-
- -#### 📝 Description - -
-
- +
client.tools.getEnvironmentVariables(id) -> Humanloop.FileEnvironmentVariableRequest[]
-List all Datasets. - -
-
-
-
- #### 🔌 Usage
@@ -2448,20 +2649,7 @@ List all Datasets.
```typescript -const response = await client.datasets.list({ - size: 1, -}); -for await (const item of response) { - console.log(item); -} - -// Or you can manually iterate page-by-page -const page = await client.datasets.list({ - size: 1, -}); -while (page.hasNextPage()) { - page = page.getNextPage(); -} +await client.tools.getEnvironmentVariables("id"); ```
@@ -2477,7 +2665,7 @@ while (page.hasNextPage()) {
-**request:** `Humanloop.ListDatasetsGetRequest` +**id:** `string` — Unique identifier for File.
@@ -2485,7 +2673,7 @@ while (page.hasNextPage()) {
-**requestOptions:** `Datasets.RequestOptions` +**requestOptions:** `Tools.RequestOptions`
@@ -2496,7 +2684,7 @@ while (page.hasNextPage()) {
-
client.datasets.upsert({ ...params }) -> Humanloop.DatasetResponse +
client.tools.addEnvironmentVariable(id, { ...params }) -> Humanloop.FileEnvironmentVariableRequest[]
@@ -2508,23 +2696,7 @@ while (page.hasNextPage()) {
-Create a Dataset or update it with a new version if it already exists. - -Datasets are identified by the `ID` or their `path`. The datapoints determine the versions of the Dataset. - -By default, the new Dataset version will be set to the list of Datapoints provided in -the request. You can also create a new version by adding or removing Datapoints from an existing version -by specifying `action` as `add` or `remove` respectively. In this case, you may specify -the `version_id` or `environment` query parameters to identify the existing version to base -the new version on. If neither is provided, the latest created version will be used. - -You can provide `version_name` and `version_description` to identify and describe your versions. -Version names must be unique within a Dataset - attempting to create a version with a name -that already exists will result in a 409 Conflict error. - -Humanloop also deduplicates Datapoints. If you try to add a Datapoint that already -exists, it will be ignored. If you intentionally want to add a duplicate Datapoint, -you can add a unique identifier to the Datapoint's inputs such as `{_dedupe_id: }`. +Add an environment variable to a Tool.
@@ -2540,11 +2712,237 @@ you can add a unique identifier to the Datapoint's inputs such as `{_dedupe_id:
```typescript -await client.datasets.upsert({ - path: "test-questions", - datapoints: [ - { - inputs: { +await client.tools.addEnvironmentVariable("id", [ + { + name: "name", + value: "value", + }, +]); +``` + +
+
+ + + +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `string` — Unique identifier for Tool. + +
+
+ +
+
+ +**request:** `Humanloop.FileEnvironmentVariableRequest[]` + +
+
+ +
+
+ +**requestOptions:** `Tools.RequestOptions` + +
+
+
+
+ + + +
+ +
client.tools.deleteEnvironmentVariable(id, name) -> Humanloop.FileEnvironmentVariableRequest[] +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```typescript +await client.tools.deleteEnvironmentVariable("id", "name"); +``` + +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `string` — Unique identifier for File. + +
+
+ +
+
+ +**name:** `string` — Name of the Environment Variable to delete. + +
+
+ +
+
+ +**requestOptions:** `Tools.RequestOptions` + +
+
+
+
+ +
+
+
+ +## Datasets + +
client.datasets.list({ ...params }) -> core.Page +
+
+ +#### 📝 Description + +
+
+ +
+
+ +List all Datasets. + +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```typescript +const response = await client.datasets.list({ + size: 1, +}); +for await (const item of response) { + console.log(item); +} + +// Or you can manually iterate page-by-page +const page = await client.datasets.list({ + size: 1, +}); +while (page.hasNextPage()) { + page = page.getNextPage(); +} +``` + +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request:** `Humanloop.ListDatasetsGetRequest` + +
+
+ +
+
+ +**requestOptions:** `Datasets.RequestOptions` + +
+
+
+
+ +
+
+
+ +
client.datasets.upsert({ ...params }) -> Humanloop.DatasetResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Create a Dataset or update it with a new version if it already exists. + +Datasets are identified by the `ID` or their `path`. The datapoints determine the versions of the Dataset. + +By default, the new Dataset version will be set to the list of Datapoints provided in +the request. You can also create a new version by adding or removing Datapoints from an existing version +by specifying `action` as `add` or `remove` respectively. In this case, you may specify +the `version_id` or `environment` query parameters to identify the existing version to base +the new version on. If neither is provided, the latest created version will be used. + +You can provide `version_name` and `version_description` to identify and describe your versions. +Version names must be unique within a Dataset - attempting to create a version with a name +that already exists will result in a 409 Conflict error. + +Humanloop also deduplicates Datapoints. If you try to add a Datapoint that already +exists, it will be ignored. If you intentionally want to add a duplicate Datapoint, +you can add a unique identifier to the Datapoint's inputs such as `{_dedupe_id: }`. + +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```typescript +await client.datasets.upsert({ + path: "test-questions", + datapoints: [ + { + inputs: { question: "What is the capital of France?", }, target: { @@ -5449,6 +5847,1617 @@ await client.flows.updateMonitoring("fl_6o701g4jmcanPVHxdqD0O", {
+## Agents + +
client.agents.log({ ...params }) -> Humanloop.CreateAgentLogResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Create an Agent Log. + +You can use query parameters `version_id`, or `environment`, to target +an existing version of the Agent. Otherwise, the default deployed version will be chosen. + +If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete` +in order to trigger Evaluators. + +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```typescript +await client.agents.log({ + path: "Banking/Teller Agent", + agent: { + provider: "anthropic", + endpoint: "chat", + model: "claude-3-7-sonnet-latest", + reasoningEffort: 1024, + template: [ + { + role: "system", + content: "You are a helpful digital assistant, helping users navigate our digital banking platform.", + }, + ], + maxIterations: 3, + tools: [ + { + type: "file", + link: { + fileId: "pr_1234567890", + versionId: "prv_1234567890", + }, + onAgentCall: "continue", + }, + { + type: "inline", + jsonSchema: { + name: "stop", + description: "Call this tool when you have finished your task.", + parameters: { + type: "object", + properties: { + output: { + type: "string", + description: "The final output to return to the user.", + }, + }, + additionalProperties: false, + required: ["output"], + }, + strict: true, + }, + onAgentCall: "stop", + }, + ], + }, +}); +``` + +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request:** `Humanloop.AgentLogRequest` + +
+
+ +
+
+ +**requestOptions:** `Agents.RequestOptions` + +
+
+
+
+ +
+
+
+ +
client.agents.updateLog(id, logId, { ...params }) -> Humanloop.AgentLogResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Update a Log. + +Update the details of a Log with the given ID. + +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```typescript +await client.agents.updateLog("ag_1234567890", "log_1234567890", { + messages: [ + { + role: "user", + content: "I need to withdraw $1000", + }, + { + role: "assistant", + content: "Of course! Would you like to use your savings or checking account?", + }, + ], + outputMessage: { + role: "assistant", + content: "I'm sorry, I can't help with that.", + }, + logStatus: "complete", +}); +``` + +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `string` — Unique identifier for Agent. + +
+
+ +
+
+ +**logId:** `string` — Unique identifier for the Log. + +
+
+ +
+
+ +**request:** `Humanloop.UpdateAgentLogRequest` + +
+
+ +
+
+ +**requestOptions:** `Agents.RequestOptions` + +
+
+
+
+ +
+
+
+ +
client.agents.callStream({ ...params }) -> core.Stream +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log. + +If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue, +pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint. + +The agent will run for the maximum number of iterations, or until it encounters a stop condition, +according to its configuration. + +You can use query parameters `version_id`, or `environment`, to target +an existing version of the Agent. Otherwise the default deployed version will be chosen. + +Instead of targeting an existing version explicitly, you can instead pass in +Agent details in the request body. A new version is created if it does not match +any existing ones. This is helpful in the case where you are storing or deriving +your Agent details in code. + +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```typescript +const response = await client.agents.callStream({}); +for await (const item of response) { + console.log(item); +} +``` + +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request:** `Humanloop.AgentsCallStreamRequest` + +
+
+ +
+
+ +**requestOptions:** `Agents.RequestOptions` + +
+
+
+
+ +
+
+
+ +
client.agents.call({ ...params }) -> Humanloop.AgentCallResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log. + +If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue, +pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint. + +The agent will run for the maximum number of iterations, or until it encounters a stop condition, +according to its configuration. + +You can use query parameters `version_id`, or `environment`, to target +an existing version of the Agent. Otherwise the default deployed version will be chosen. + +Instead of targeting an existing version explicitly, you can instead pass in +Agent details in the request body. A new version is created if it does not match +any existing ones. This is helpful in the case where you are storing or deriving +your Agent details in code. + +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```typescript +await client.agents.call({ + path: "Banking/Teller Agent", + messages: [ + { + role: "user", + content: "I'd like to deposit $1000 to my savings account from my checking account.", + }, + ], +}); +``` + +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request:** `Humanloop.AgentsCallRequest` + +
+
+ +
+
+ +**requestOptions:** `Agents.RequestOptions` + +
+
+
+
+ +
+
+
+ +
client.agents.continueCallStream({ ...params }) -> core.Stream +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Continue an incomplete Agent call. + +This endpoint allows continuing an existing incomplete Agent call, by passing the tool call +requested by the Agent. The Agent will resume processing from where it left off. + +The messages in the request will be appended to the original messages in the Log. You do not +have to provide the previous conversation history. + +The original log must be in an incomplete state to be continued. + +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```typescript +const response = await client.agents.continueCallStream({ + logId: "log_id", + messages: [ + { + role: "user", + }, + ], +}); +for await (const item of response) { + console.log(item); +} +``` + +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request:** `Humanloop.AgentsContinueCallStreamRequest` + +
+
+ +
+
+ +**requestOptions:** `Agents.RequestOptions` + +
+
+
+
+ +
+
+
+ +
client.agents.continueCall({ ...params }) -> Humanloop.AgentContinueCallResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Continue an incomplete Agent call. + +This endpoint allows continuing an existing incomplete Agent call, by passing the tool call +requested by the Agent. The Agent will resume processing from where it left off. + +The messages in the request will be appended to the original messages in the Log. You do not +have to provide the previous conversation history. + +The original log must be in an incomplete state to be continued. + +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```typescript +await client.agents.continueCall({ + logId: "log_1234567890", + messages: [ + { + role: "tool", + content: '{"type": "checking", "balance": 5200}', + toolCallId: "tc_1234567890", + }, + ], +}); +``` + +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request:** `Humanloop.AgentsContinueCallRequest` + +
+
+ +
+
+ +**requestOptions:** `Agents.RequestOptions` + +
+
+
+
+ +
+
+
+ +
client.agents.list({ ...params }) -> core.Page +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get a list of all Agents. + +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```typescript +const response = await client.agents.list({ + size: 1, +}); +for await (const item of response) { + console.log(item); +} + +// Or you can manually iterate page-by-page +const page = await client.agents.list({ + size: 1, +}); +while (page.hasNextPage()) { + page = page.getNextPage(); +} +``` + +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request:** `Humanloop.ListAgentsGetRequest` + +
+
+ +
+
+ +**requestOptions:** `Agents.RequestOptions` + +
+
+
+
+ +
+
+
+ +
client.agents.upsert({ ...params }) -> Humanloop.AgentResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Create an Agent or update it with a new version if it already exists. + +Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and +tools determine the versions of the Agent. + +You can provide `version_name` and `version_description` to identify and describe your versions. +Version names must be unique within an Agent - attempting to create a version with a name +that already exists will result in a 409 Conflict error. + +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```typescript +await client.agents.upsert({ + path: "Banking/Teller Agent", + provider: "anthropic", + endpoint: "chat", + model: "claude-3-7-sonnet-latest", + reasoningEffort: 1024, + template: [ + { + role: "system", + content: "You are a helpful digital assistant, helping users navigate our digital banking platform.", + }, + ], + maxIterations: 3, + tools: [ + { + type: "inline", + jsonSchema: { + name: "stop", + description: "Call this tool when you have finished your task.", + parameters: { + type: "object", + properties: { + output: { + type: "string", + description: "The final output to return to the user.", + }, + }, + additionalProperties: false, + required: ["output"], + }, + strict: true, + }, + onAgentCall: "stop", + }, + ], + versionName: "teller-agent-v1", + versionDescription: "Initial version", +}); +``` + +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request:** `Humanloop.AgentRequest` + +
+
+ +
+
+ +**requestOptions:** `Agents.RequestOptions` + +
+
+
+
+ +
+
+
+ +
client.agents.deleteAgentVersion(id, versionId) -> void +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Delete a version of the Agent. + +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```typescript +await client.agents.deleteAgentVersion("ag_1234567890", "agv_1234567890"); +``` + +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `string` — Unique identifier for Agent. + +
+
+ +
+
+ +**versionId:** `string` — Unique identifier for the specific version of the Agent. + +
+
+ +
+
+ +**requestOptions:** `Agents.RequestOptions` + +
+
+
+
+ +
+
+
+ +
client.agents.patchAgentVersion(id, versionId, { ...params }) -> Humanloop.AgentResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Update the name or description of the Agent version. + +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```typescript +await client.agents.patchAgentVersion("ag_1234567890", "agv_1234567890", { + name: "teller-agent-v2", + description: "Updated version", +}); +``` + +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `string` — Unique identifier for Agent. + +
+
+ +
+
+ +**versionId:** `string` — Unique identifier for the specific version of the Agent. + +
+
+ +
+
+ +**request:** `Humanloop.UpdateVersionRequest` + +
+
+ +
+
+ +**requestOptions:** `Agents.RequestOptions` + +
+
+
+
+ +
+
+
+ +
client.agents.get(id, { ...params }) -> Humanloop.AgentResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Retrieve the Agent with the given ID. + +By default, the deployed version of the Agent is returned. Use the query parameters +`version_id` or `environment` to target a specific version of the Agent. + +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```typescript +await client.agents.get("ag_1234567890"); +``` + +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `string` — Unique identifier for Agent. + +
+
+ +
+
+ +**request:** `Humanloop.GetAgentsIdGetRequest` + +
+
+ +
+
+ +**requestOptions:** `Agents.RequestOptions` + +
+
+
+
+ +
+
+
+ +
client.agents.delete(id) -> void +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Delete the Agent with the given ID. + +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```typescript +await client.agents.delete("ag_1234567890"); +``` + +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `string` — Unique identifier for Agent. + +
+
+ +
+
+ +**requestOptions:** `Agents.RequestOptions` + +
+
+
+
+ +
+
+
+ +
client.agents.move(id, { ...params }) -> Humanloop.AgentResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Move the Agent to a different path or change the name. + +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```typescript +await client.agents.move("ag_1234567890", { + path: "new directory/new name", +}); +``` + +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `string` — Unique identifier for Agent. + +
+
+ +
+
+ +**request:** `Humanloop.UpdateAgentRequest` + +
+
+ +
+
+ +**requestOptions:** `Agents.RequestOptions` + +
+
+
+
+ +
+
+
+ +
client.agents.listVersions(id, { ...params }) -> Humanloop.ListAgents +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get a list of all the versions of a Agent. + +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```typescript +await client.agents.listVersions("ag_1234567890"); +``` + +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `string` — Unique identifier for Agent. + +
+
+ +
+
+ +**request:** `Humanloop.ListVersionsAgentsIdVersionsGetRequest` + +
+
+ +
+
+ +**requestOptions:** `Agents.RequestOptions` + +
+
+
+
+ +
+
+
+ +
client.agents.setDeployment(id, environmentId, { ...params }) -> Humanloop.AgentResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Deploy Agent to an Environment. + +Set the deployed version for the specified Environment. This Agent +will be used for calls made to the Agent in this Environment. + +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```typescript +await client.agents.setDeployment("id", "environment_id", { + versionId: "version_id", +}); +``` + +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `string` — Unique identifier for Agent. + +
+
+ +
+
+ +**environmentId:** `string` — Unique identifier for the Environment to deploy the Version to. + +
+
+ +
+
+ +**request:** `Humanloop.SetDeploymentAgentsIdEnvironmentsEnvironmentIdPostRequest` + +
+
+ +
+
+ +**requestOptions:** `Agents.RequestOptions` + +
+
+
+
+ +
+
+
+ +
client.agents.removeDeployment(id, environmentId) -> void +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Remove deployed Agent from the Environment. + +Remove the deployed version for the specified Environment. This Agent +will no longer be used for calls made to the Agent in this Environment. + +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```typescript +await client.agents.removeDeployment("id", "environment_id"); +``` + +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `string` — Unique identifier for Agent. + +
+
+ +
+
+ +**environmentId:** `string` — Unique identifier for the Environment to remove the deployment from. + +
+
+ +
+
+ +**requestOptions:** `Agents.RequestOptions` + +
+
+
+
+ +
+
+
+ +
client.agents.listEnvironments(id) -> Humanloop.FileEnvironmentResponse[] +
+
+ +#### 📝 Description + +
+
+ +
+
+ +List all Environments and their deployed versions for the Agent. + +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```typescript +await client.agents.listEnvironments("ag_1234567890"); +``` + +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `string` — Unique identifier for Agent. + +
+
+ +
+
+ +**requestOptions:** `Agents.RequestOptions` + +
+
+
+
+ +
+
+
+ +
client.agents.updateMonitoring(id, { ...params }) -> Humanloop.AgentResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Activate and deactivate Evaluators for monitoring the Agent. + +An activated Evaluator will automatically be run on all new Logs +within the Agent for monitoring purposes. + +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```typescript +await client.agents.updateMonitoring("ag_1234567890", { + activate: [ + { + evaluatorVersionId: "ev_1234567890", + }, + { + evaluatorId: "ev_2345678901", + environmentId: "env_1234567890", + }, + ], + deactivate: [ + { + evaluatorVersionId: "ev_0987654321", + }, + ], +}); +``` + +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `string` + +
+
+ +
+
+ +**request:** `Humanloop.EvaluatorActivationDeactivationRequest` + +
+
+ +
+
+ +**requestOptions:** `Agents.RequestOptions` + +
+
+
+
+ +
+
+
+ +
client.agents.serialize(id, { ...params }) -> void +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Serialize an Agent to the .agent file format. + +Useful for storing the Agent with your code in a version control system, +or for editing with an AI tool. + +By default, the deployed version of the Agent is returned. Use the query parameters +`version_id` or `environment` to target a specific version of the Agent. + +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```typescript +await client.agents.serialize("id"); +``` + +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `string` — Unique identifier for Agent. + +
+
+ +
+
+ +**request:** `Humanloop.SerializeAgentsIdSerializeGetRequest` + +
+
+ +
+
+ +**requestOptions:** `Agents.RequestOptions` + +
+
+
+
+ +
+
+
+ +
client.agents.deserialize({ ...params }) -> Humanloop.AgentKernelRequest +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Deserialize an Agent from the .agent file format. + +This returns a subset of the attributes required by an Agent. +This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc) + +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```typescript +await client.agents.deserialize({ + agent: "agent", +}); +``` + +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request:** `Humanloop.BodyDeserializeAgentsDeserializePost` + +
+
+ +
+
+ +**requestOptions:** `Agents.RequestOptions` + +
+
+
+
+ +
+
+
+ ## Directories
client.directories.list() -> Humanloop.DirectoryResponse[] @@ -5770,7 +7779,7 @@ await client.directories.update("id"); ## Files -
client.files.listFiles({ ...params }) -> Humanloop.PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse +
client.files.listFiles({ ...params }) -> Humanloop.PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse
diff --git a/src/Client.ts b/src/Client.ts index 06394656..7f0ce136 100644 --- a/src/Client.ts +++ b/src/Client.ts @@ -9,6 +9,7 @@ import { Tools } from "./api/resources/tools/client/Client"; import { Datasets } from "./api/resources/datasets/client/Client"; import { Evaluators } from "./api/resources/evaluators/client/Client"; import { Flows } from "./api/resources/flows/client/Client"; +import { Agents } from "./api/resources/agents/client/Client"; import { Directories } from "./api/resources/directories/client/Client"; import { Files } from "./api/resources/files/client/Client"; import { Evaluations } from "./api/resources/evaluations/client/Client"; @@ -41,6 +42,7 @@ export class HumanloopClient { protected _datasets: Datasets | undefined; protected _evaluators: Evaluators | undefined; protected _flows: Flows | undefined; + protected _agents: Agents | undefined; protected _directories: Directories | undefined; protected _files: Files | undefined; protected _evaluations: Evaluations | undefined; @@ -68,6 +70,10 @@ export class HumanloopClient { return (this._flows ??= new Flows(this._options)); } + public get agents(): Agents { + return (this._agents ??= new Agents(this._options)); + } + public get directories(): Directories { return (this._directories ??= new Directories(this._options)); } diff --git a/src/api/resources/agents/client/Client.ts b/src/api/resources/agents/client/Client.ts new file mode 100644 index 00000000..842af06d --- /dev/null +++ b/src/api/resources/agents/client/Client.ts @@ -0,0 +1,1943 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as environments from "../../../../environments"; +import * as core from "../../../../core"; +import * as Humanloop from "../../../index"; +import * as serializers from "../../../../serialization/index"; +import urlJoin from "url-join"; +import * as errors from "../../../../errors/index"; +import * as stream from "stream"; + +export declare namespace Agents { + export interface Options { + environment?: core.Supplier; + /** Specify a custom URL to connect the client to. */ + baseUrl?: core.Supplier; + apiKey?: core.Supplier; + fetcher?: core.FetchFunction; + } + + export interface RequestOptions { + /** The maximum time to wait for a response in seconds. */ + timeoutInSeconds?: number; + /** The number of times to retry the request. Defaults to 2. */ + maxRetries?: number; + /** A hook to abort the request. */ + abortSignal?: AbortSignal; + /** Additional headers to include in the request. */ + headers?: Record; + } +} + +export class Agents { + constructor(protected readonly _options: Agents.Options = {}) {} + + /** + * Create an Agent Log. + * + * You can use query parameters `version_id`, or `environment`, to target + * an existing version of the Agent. Otherwise, the default deployed version will be chosen. + * + * If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete` + * in order to trigger Evaluators. + * + * @param {Humanloop.AgentLogRequest} request + * @param {Agents.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Humanloop.UnprocessableEntityError} + * + * @example + * await client.agents.log() + */ + public async log( + request: Humanloop.AgentLogRequest = {}, + requestOptions?: Agents.RequestOptions, + ): Promise { + const { versionId, environment, ..._body } = request; + const _queryParams: Record = {}; + if (versionId != null) { + _queryParams["version_id"] = versionId; + } + + if (environment != null) { + _queryParams["environment"] = environment; + } + + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.HumanloopEnvironment.Default, + "agents/log", + ), + method: "POST", + headers: { + "X-Fern-Language": "JavaScript", + "X-Fern-SDK-Name": "humanloop", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...(await this._getCustomAuthorizationHeaders()), + ...requestOptions?.headers, + }, + contentType: "application/json", + queryParameters: _queryParams, + requestType: "json", + body: serializers.AgentLogRequest.jsonOrThrow(_body, { unrecognizedObjectKeys: "strip" }), + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.CreateAgentLogResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 422: + throw new Humanloop.UnprocessableEntityError( + serializers.HttpValidationError.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.HumanloopTimeoutError("Timeout exceeded when calling POST /agents/log."); + case "unknown": + throw new errors.HumanloopError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * Update a Log. + * + * Update the details of a Log with the given ID. + * + * @param {string} id - Unique identifier for Agent. + * @param {string} logId - Unique identifier for the Log. + * @param {Humanloop.UpdateAgentLogRequest} request + * @param {Agents.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Humanloop.UnprocessableEntityError} + * + * @example + * await client.agents.updateLog("id", "log_id") + */ + public async updateLog( + id: string, + logId: string, + request: Humanloop.UpdateAgentLogRequest = {}, + requestOptions?: Agents.RequestOptions, + ): Promise { + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.HumanloopEnvironment.Default, + `agents/${encodeURIComponent(id)}/log/${encodeURIComponent(logId)}`, + ), + method: "PATCH", + headers: { + "X-Fern-Language": "JavaScript", + "X-Fern-SDK-Name": "humanloop", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...(await this._getCustomAuthorizationHeaders()), + ...requestOptions?.headers, + }, + contentType: "application/json", + requestType: "json", + body: serializers.UpdateAgentLogRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }), + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.LogResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 422: + throw new Humanloop.UnprocessableEntityError( + serializers.HttpValidationError.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.HumanloopTimeoutError( + "Timeout exceeded when calling PATCH /agents/{id}/log/{log_id}.", + ); + case "unknown": + throw new errors.HumanloopError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * Call an Agent. + * + * Calling an Agent calls the model provider before logging + * the request, responses and metadata to Humanloop. + * + * You can use query parameters `version_id`, or `environment`, to target + * an existing version of the Agent. Otherwise the default deployed version will be chosen. + * + * Instead of targeting an existing version explicitly, you can instead pass in + * Agent details in the request body. In this case, we will check if the details correspond + * to an existing version of the Agent. If they do not, we will create a new version. This is helpful + * in the case where you are storing or deriving your Agent details in code. + */ + public async callStream( + request: Humanloop.AgentsCallStreamRequest, + requestOptions?: Agents.RequestOptions, + ): Promise> { + const { versionId, environment, ..._body } = request; + const _queryParams: Record = {}; + if (versionId != null) { + _queryParams["version_id"] = versionId; + } + + if (environment != null) { + _queryParams["environment"] = environment; + } + + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.HumanloopEnvironment.Default, + "agents/call", + ), + method: "POST", + headers: { + "X-Fern-Language": "JavaScript", + "X-Fern-SDK-Name": "humanloop", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...(await this._getCustomAuthorizationHeaders()), + ...requestOptions?.headers, + }, + contentType: "application/json", + queryParameters: _queryParams, + requestType: "json", + body: { + ...serializers.AgentsCallStreamRequest.jsonOrThrow(_body, { unrecognizedObjectKeys: "strip" }), + stream: true, + }, + responseType: "sse", + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return new core.Stream({ + stream: _response.body, + parse: async (data) => { + return serializers.AgentContinueCallStreamResponse.parseOrThrow(data, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + }, + signal: requestOptions?.abortSignal, + eventShape: { + type: "sse", + streamTerminator: "[DONE]", + }, + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 422: + throw new Humanloop.UnprocessableEntityError( + serializers.HttpValidationError.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.HumanloopTimeoutError("Timeout exceeded when calling POST /agents/call."); + case "unknown": + throw new errors.HumanloopError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * Call an Agent. + * + * Calling an Agent calls the model provider before logging + * the request, responses and metadata to Humanloop. + * + * You can use query parameters `version_id`, or `environment`, to target + * an existing version of the Agent. Otherwise the default deployed version will be chosen. + * + * Instead of targeting an existing version explicitly, you can instead pass in + * Agent details in the request body. In this case, we will check if the details correspond + * to an existing version of the Agent. If they do not, we will create a new version. This is helpful + * in the case where you are storing or deriving your Agent details in code. + * + * @param {Humanloop.AgentsCallRequest} request + * @param {Agents.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Humanloop.UnprocessableEntityError} + * + * @example + * await client.agents.call({}) + */ + public async call( + request: Humanloop.AgentsCallRequest, + requestOptions?: Agents.RequestOptions, + ): Promise { + const { versionId, environment, ..._body } = request; + const _queryParams: Record = {}; + if (versionId != null) { + _queryParams["version_id"] = versionId; + } + + if (environment != null) { + _queryParams["environment"] = environment; + } + + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.HumanloopEnvironment.Default, + "agents/call", + ), + method: "POST", + headers: { + "X-Fern-Language": "JavaScript", + "X-Fern-SDK-Name": "humanloop", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...(await this._getCustomAuthorizationHeaders()), + ...requestOptions?.headers, + }, + contentType: "application/json", + queryParameters: _queryParams, + requestType: "json", + body: { + ...serializers.AgentsCallRequest.jsonOrThrow(_body, { unrecognizedObjectKeys: "strip" }), + stream: false, + }, + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.AgentContinueCallResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 422: + throw new Humanloop.UnprocessableEntityError( + serializers.HttpValidationError.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.HumanloopTimeoutError("Timeout exceeded when calling POST /agents/call."); + case "unknown": + throw new errors.HumanloopError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * Continue an incomplete Agent call. + * + * This endpoint allows continuing an existing incomplete Agent call, using the context + * from the previous interaction. The Agent will resume processing from where it left off. + * + * The original log must be in an incomplete state to be continued. + * + * The messages in the request will be appended + * to the original messages in the log. + */ + public async continueStream( + request: Humanloop.AgentsContinueCallStreamRequest, + requestOptions?: Agents.RequestOptions, + ): Promise> { + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.HumanloopEnvironment.Default, + "agents/continue", + ), + method: "POST", + headers: { + "X-Fern-Language": "JavaScript", + "X-Fern-SDK-Name": "humanloop", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...(await this._getCustomAuthorizationHeaders()), + ...requestOptions?.headers, + }, + contentType: "application/json", + requestType: "json", + body: { + ...serializers.AgentsContinueCallStreamRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }), + stream: true, + }, + responseType: "sse", + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return new core.Stream({ + stream: _response.body, + parse: async (data) => { + return serializers.AgentContinueCallStreamResponse.parseOrThrow(data, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + }, + signal: requestOptions?.abortSignal, + eventShape: { + type: "sse", + streamTerminator: "[DONE]", + }, + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 422: + throw new Humanloop.UnprocessableEntityError( + serializers.HttpValidationError.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.HumanloopTimeoutError("Timeout exceeded when calling POST /agents/continue."); + case "unknown": + throw new errors.HumanloopError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * Continue an incomplete Agent call. + * + * This endpoint allows continuing an existing incomplete Agent call, using the context + * from the previous interaction. The Agent will resume processing from where it left off. + * + * The original log must be in an incomplete state to be continued. + * + * The messages in the request will be appended + * to the original messages in the log. + * + * @param {Humanloop.AgentsContinueRequest} request + * @param {Agents.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Humanloop.UnprocessableEntityError} + * + * @example + * await client.agents.continue({ + * logId: "log_id", + * messages: [{ + * role: "user" + * }] + * }) + */ + public async continue( + request: Humanloop.AgentsContinueCallRequest, + requestOptions?: Agents.RequestOptions, + ): Promise { + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.HumanloopEnvironment.Default, + "agents/continue", + ), + method: "POST", + headers: { + "X-Fern-Language": "JavaScript", + "X-Fern-SDK-Name": "humanloop", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...(await this._getCustomAuthorizationHeaders()), + ...requestOptions?.headers, + }, + contentType: "application/json", + requestType: "json", + body: { + ...serializers.AgentsContinueCallRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }), + stream: false, + }, + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.AgentContinueCallResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 422: + throw new Humanloop.UnprocessableEntityError( + serializers.HttpValidationError.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.HumanloopTimeoutError("Timeout exceeded when calling POST /agents/continue."); + case "unknown": + throw new errors.HumanloopError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * Get a list of all Agents. + * + * @param {Humanloop.ListAgentsGetRequest} request + * @param {Agents.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Humanloop.UnprocessableEntityError} + * + * @example + * await client.agents.list() + */ + public async list( + request: Humanloop.ListAgentsGetRequest = {}, + requestOptions?: Agents.RequestOptions, + ): Promise { + const { page, size, name, userFilter, sortBy, order } = request; + const _queryParams: Record = {}; + if (page != null) { + _queryParams["page"] = page.toString(); + } + + if (size != null) { + _queryParams["size"] = size.toString(); + } + + if (name != null) { + _queryParams["name"] = name; + } + + if (userFilter != null) { + _queryParams["user_filter"] = userFilter; + } + + if (sortBy != null) { + _queryParams["sort_by"] = serializers.ProjectSortBy.jsonOrThrow(sortBy, { + unrecognizedObjectKeys: "strip", + }); + } + + if (order != null) { + _queryParams["order"] = serializers.SortOrder.jsonOrThrow(order, { unrecognizedObjectKeys: "strip" }); + } + + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.HumanloopEnvironment.Default, + "agents", + ), + method: "GET", + headers: { + "X-Fern-Language": "JavaScript", + "X-Fern-SDK-Name": "humanloop", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...(await this._getCustomAuthorizationHeaders()), + ...requestOptions?.headers, + }, + contentType: "application/json", + queryParameters: _queryParams, + requestType: "json", + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.PaginatedDataAgentResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 422: + throw new Humanloop.UnprocessableEntityError( + serializers.HttpValidationError.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.HumanloopTimeoutError("Timeout exceeded when calling GET /agents."); + case "unknown": + throw new errors.HumanloopError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * Create an Agent or update it with a new version if it already exists. + * + * Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and + * tools determine the versions of the Agent. + * + * You can provide `version_name` and `version_description` to identify and describe your versions. + * Version names must be unique within an Agent - attempting to create a version with a name + * that already exists will result in a 409 Conflict error. + * + * @param {Humanloop.AgentRequest} request + * @param {Agents.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Humanloop.UnprocessableEntityError} + * + * @example + * await client.agents.upsert({ + * model: "model" + * }) + */ + public async upsert( + request: Humanloop.AgentRequest, + requestOptions?: Agents.RequestOptions, + ): Promise { + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.HumanloopEnvironment.Default, + "agents", + ), + method: "POST", + headers: { + "X-Fern-Language": "JavaScript", + "X-Fern-SDK-Name": "humanloop", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...(await this._getCustomAuthorizationHeaders()), + ...requestOptions?.headers, + }, + contentType: "application/json", + requestType: "json", + body: serializers.AgentRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }), + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.AgentResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 422: + throw new Humanloop.UnprocessableEntityError( + serializers.HttpValidationError.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.HumanloopTimeoutError("Timeout exceeded when calling POST /agents."); + case "unknown": + throw new errors.HumanloopError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * Delete a version of the Agent. + * + * @param {string} id - Unique identifier for Agent. + * @param {string} versionId - Unique identifier for the specific version of the Agent. + * @param {Agents.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Humanloop.UnprocessableEntityError} + * + * @example + * await client.agents.deleteAgentVersion("id", "version_id") + */ + public async deleteAgentVersion( + id: string, + versionId: string, + requestOptions?: Agents.RequestOptions, + ): Promise { + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.HumanloopEnvironment.Default, + `agents/${encodeURIComponent(id)}/versions/${encodeURIComponent(versionId)}`, + ), + method: "DELETE", + headers: { + "X-Fern-Language": "JavaScript", + "X-Fern-SDK-Name": "humanloop", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...(await this._getCustomAuthorizationHeaders()), + ...requestOptions?.headers, + }, + contentType: "application/json", + requestType: "json", + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return; + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 422: + throw new Humanloop.UnprocessableEntityError( + serializers.HttpValidationError.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.HumanloopTimeoutError( + "Timeout exceeded when calling DELETE /agents/{id}/versions/{version_id}.", + ); + case "unknown": + throw new errors.HumanloopError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * Update the name or description of the Agent version. + * + * @param {string} id - Unique identifier for Agent. + * @param {string} versionId - Unique identifier for the specific version of the Agent. + * @param {Humanloop.UpdateVersionRequest} request + * @param {Agents.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Humanloop.UnprocessableEntityError} + * + * @example + * await client.agents.patchAgentVersion("id", "version_id", {}) + */ + public async patchAgentVersion( + id: string, + versionId: string, + request: Humanloop.UpdateVersionRequest, + requestOptions?: Agents.RequestOptions, + ): Promise { + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.HumanloopEnvironment.Default, + `agents/${encodeURIComponent(id)}/versions/${encodeURIComponent(versionId)}`, + ), + method: "PATCH", + headers: { + "X-Fern-Language": "JavaScript", + "X-Fern-SDK-Name": "humanloop", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...(await this._getCustomAuthorizationHeaders()), + ...requestOptions?.headers, + }, + contentType: "application/json", + requestType: "json", + body: serializers.UpdateVersionRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }), + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.AgentResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 422: + throw new Humanloop.UnprocessableEntityError( + serializers.HttpValidationError.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.HumanloopTimeoutError( + "Timeout exceeded when calling PATCH /agents/{id}/versions/{version_id}.", + ); + case "unknown": + throw new errors.HumanloopError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * Retrieve the Agent with the given ID. + * + * By default, the deployed version of the Agent is returned. Use the query parameters + * `version_id` or `environment` to target a specific version of the Agent. + * + * @param {string} id - Unique identifier for Agent. + * @param {Humanloop.GetAgentsIdGetRequest} request + * @param {Agents.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Humanloop.UnprocessableEntityError} + * + * @example + * await client.agents.get("id") + */ + public async get( + id: string, + request: Humanloop.GetAgentsIdGetRequest = {}, + requestOptions?: Agents.RequestOptions, + ): Promise { + const { versionId, environment } = request; + const _queryParams: Record = {}; + if (versionId != null) { + _queryParams["version_id"] = versionId; + } + + if (environment != null) { + _queryParams["environment"] = environment; + } + + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.HumanloopEnvironment.Default, + `agents/${encodeURIComponent(id)}`, + ), + method: "GET", + headers: { + "X-Fern-Language": "JavaScript", + "X-Fern-SDK-Name": "humanloop", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...(await this._getCustomAuthorizationHeaders()), + ...requestOptions?.headers, + }, + contentType: "application/json", + queryParameters: _queryParams, + requestType: "json", + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.AgentResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 422: + throw new Humanloop.UnprocessableEntityError( + serializers.HttpValidationError.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.HumanloopTimeoutError("Timeout exceeded when calling GET /agents/{id}."); + case "unknown": + throw new errors.HumanloopError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * Delete the Agent with the given ID. + * + * @param {string} id - Unique identifier for Agent. + * @param {Agents.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Humanloop.UnprocessableEntityError} + * + * @example + * await client.agents.delete("id") + */ + public async delete(id: string, requestOptions?: Agents.RequestOptions): Promise { + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.HumanloopEnvironment.Default, + `agents/${encodeURIComponent(id)}`, + ), + method: "DELETE", + headers: { + "X-Fern-Language": "JavaScript", + "X-Fern-SDK-Name": "humanloop", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...(await this._getCustomAuthorizationHeaders()), + ...requestOptions?.headers, + }, + contentType: "application/json", + requestType: "json", + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return; + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 422: + throw new Humanloop.UnprocessableEntityError( + serializers.HttpValidationError.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.HumanloopTimeoutError("Timeout exceeded when calling DELETE /agents/{id}."); + case "unknown": + throw new errors.HumanloopError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * Move the Agent to a different path or change the name. + * + * @param {string} id - Unique identifier for Agent. + * @param {Humanloop.UpdateAgentRequest} request + * @param {Agents.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Humanloop.UnprocessableEntityError} + * + * @example + * await client.agents.move("id") + */ + public async move( + id: string, + request: Humanloop.UpdateAgentRequest = {}, + requestOptions?: Agents.RequestOptions, + ): Promise { + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.HumanloopEnvironment.Default, + `agents/${encodeURIComponent(id)}`, + ), + method: "PATCH", + headers: { + "X-Fern-Language": "JavaScript", + "X-Fern-SDK-Name": "humanloop", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...(await this._getCustomAuthorizationHeaders()), + ...requestOptions?.headers, + }, + contentType: "application/json", + requestType: "json", + body: serializers.UpdateAgentRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }), + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.AgentResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 422: + throw new Humanloop.UnprocessableEntityError( + serializers.HttpValidationError.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.HumanloopTimeoutError("Timeout exceeded when calling PATCH /agents/{id}."); + case "unknown": + throw new errors.HumanloopError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * Get a list of all the versions of a Agent. + * + * @param {string} id - Unique identifier for Agent. + * @param {Humanloop.ListVersionsAgentsIdVersionsGetRequest} request + * @param {Agents.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Humanloop.UnprocessableEntityError} + * + * @example + * await client.agents.listVersions("id") + */ + public async listVersions( + id: string, + request: Humanloop.ListVersionsAgentsIdVersionsGetRequest = {}, + requestOptions?: Agents.RequestOptions, + ): Promise { + const { evaluatorAggregates } = request; + const _queryParams: Record = {}; + if (evaluatorAggregates != null) { + _queryParams["evaluator_aggregates"] = evaluatorAggregates.toString(); + } + + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.HumanloopEnvironment.Default, + `agents/${encodeURIComponent(id)}/versions`, + ), + method: "GET", + headers: { + "X-Fern-Language": "JavaScript", + "X-Fern-SDK-Name": "humanloop", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...(await this._getCustomAuthorizationHeaders()), + ...requestOptions?.headers, + }, + contentType: "application/json", + queryParameters: _queryParams, + requestType: "json", + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.ListAgents.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 422: + throw new Humanloop.UnprocessableEntityError( + serializers.HttpValidationError.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.HumanloopTimeoutError("Timeout exceeded when calling GET /agents/{id}/versions."); + case "unknown": + throw new errors.HumanloopError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * Deploy Agent to an Environment. + * + * Set the deployed version for the specified Environment. This Agent + * will be used for calls made to the Agent in this Environment. + * + * @param {string} id - Unique identifier for Agent. + * @param {string} environmentId - Unique identifier for the Environment to deploy the Version to. + * @param {Humanloop.SetDeploymentAgentsIdEnvironmentsEnvironmentIdPostRequest} request + * @param {Agents.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Humanloop.UnprocessableEntityError} + * + * @example + * await client.agents.setDeployment("id", "environment_id", { + * versionId: "version_id" + * }) + */ + public async setDeployment( + id: string, + environmentId: string, + request: Humanloop.SetDeploymentAgentsIdEnvironmentsEnvironmentIdPostRequest, + requestOptions?: Agents.RequestOptions, + ): Promise { + const { versionId } = request; + const _queryParams: Record = {}; + _queryParams["version_id"] = versionId; + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.HumanloopEnvironment.Default, + `agents/${encodeURIComponent(id)}/environments/${encodeURIComponent(environmentId)}`, + ), + method: "POST", + headers: { + "X-Fern-Language": "JavaScript", + "X-Fern-SDK-Name": "humanloop", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...(await this._getCustomAuthorizationHeaders()), + ...requestOptions?.headers, + }, + contentType: "application/json", + queryParameters: _queryParams, + requestType: "json", + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.AgentResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 422: + throw new Humanloop.UnprocessableEntityError( + serializers.HttpValidationError.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.HumanloopTimeoutError( + "Timeout exceeded when calling POST /agents/{id}/environments/{environment_id}.", + ); + case "unknown": + throw new errors.HumanloopError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * Remove deployed Agent from the Environment. + * + * Remove the deployed version for the specified Environment. This Agent + * will no longer be used for calls made to the Agent in this Environment. + * + * @param {string} id - Unique identifier for Agent. + * @param {string} environmentId - Unique identifier for the Environment to remove the deployment from. + * @param {Agents.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Humanloop.UnprocessableEntityError} + * + * @example + * await client.agents.removeDeployment("id", "environment_id") + */ + public async removeDeployment( + id: string, + environmentId: string, + requestOptions?: Agents.RequestOptions, + ): Promise { + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.HumanloopEnvironment.Default, + `agents/${encodeURIComponent(id)}/environments/${encodeURIComponent(environmentId)}`, + ), + method: "DELETE", + headers: { + "X-Fern-Language": "JavaScript", + "X-Fern-SDK-Name": "humanloop", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...(await this._getCustomAuthorizationHeaders()), + ...requestOptions?.headers, + }, + contentType: "application/json", + requestType: "json", + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return; + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 422: + throw new Humanloop.UnprocessableEntityError( + serializers.HttpValidationError.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.HumanloopTimeoutError( + "Timeout exceeded when calling DELETE /agents/{id}/environments/{environment_id}.", + ); + case "unknown": + throw new errors.HumanloopError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * List all Environments and their deployed versions for the Agent. + * + * @param {string} id - Unique identifier for Agent. + * @param {Agents.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Humanloop.UnprocessableEntityError} + * + * @example + * await client.agents.listEnvironments("id") + */ + public async listEnvironments( + id: string, + requestOptions?: Agents.RequestOptions, + ): Promise { + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.HumanloopEnvironment.Default, + `agents/${encodeURIComponent(id)}/environments`, + ), + method: "GET", + headers: { + "X-Fern-Language": "JavaScript", + "X-Fern-SDK-Name": "humanloop", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...(await this._getCustomAuthorizationHeaders()), + ...requestOptions?.headers, + }, + contentType: "application/json", + requestType: "json", + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.agents.listEnvironments.Response.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 422: + throw new Humanloop.UnprocessableEntityError( + serializers.HttpValidationError.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.HumanloopTimeoutError("Timeout exceeded when calling GET /agents/{id}/environments."); + case "unknown": + throw new errors.HumanloopError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * Activate and deactivate Evaluators for monitoring the Agent. + * + * An activated Evaluator will automatically be run on all new Logs + * within the Agent for monitoring purposes. + * + * @param {string} id + * @param {Humanloop.EvaluatorActivationDeactivationRequest} request + * @param {Agents.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Humanloop.UnprocessableEntityError} + * + * @example + * await client.agents.updateMonitoring("id", {}) + */ + public async updateMonitoring( + id: string, + request: Humanloop.EvaluatorActivationDeactivationRequest, + requestOptions?: Agents.RequestOptions, + ): Promise { + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.HumanloopEnvironment.Default, + `agents/${encodeURIComponent(id)}/evaluators`, + ), + method: "POST", + headers: { + "X-Fern-Language": "JavaScript", + "X-Fern-SDK-Name": "humanloop", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...(await this._getCustomAuthorizationHeaders()), + ...requestOptions?.headers, + }, + contentType: "application/json", + requestType: "json", + body: serializers.EvaluatorActivationDeactivationRequest.jsonOrThrow(request, { + unrecognizedObjectKeys: "strip", + }), + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.AgentResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 422: + throw new Humanloop.UnprocessableEntityError( + serializers.HttpValidationError.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.HumanloopTimeoutError("Timeout exceeded when calling POST /agents/{id}/evaluators."); + case "unknown": + throw new errors.HumanloopError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * Serialize an Agent to the .agent file format. + * + * Useful for storing the Agent with your code in a version control system, + * or for editing with an AI tool. + * + * By default, the deployed version of the Agent is returned. Use the query parameters + * `version_id` or `environment` to target a specific version of the Agent. + * + * @param {string} id - Unique identifier for Agent. + * @param {Humanloop.SerializeAgentsIdSerializeGetRequest} request + * @param {Agents.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Humanloop.UnprocessableEntityError} + * + * @example + * await client.agents.serialize("id") + */ + public async serialize( + id: string, + request: Humanloop.SerializeAgentsIdSerializeGetRequest = {}, + requestOptions?: Agents.RequestOptions, + ): Promise { + const { versionId, environment } = request; + const _queryParams: Record = {}; + if (versionId != null) { + _queryParams["version_id"] = versionId; + } + + if (environment != null) { + _queryParams["environment"] = environment; + } + + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.HumanloopEnvironment.Default, + `agents/${encodeURIComponent(id)}/serialize`, + ), + method: "GET", + headers: { + "X-Fern-Language": "JavaScript", + "X-Fern-SDK-Name": "humanloop", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...(await this._getCustomAuthorizationHeaders()), + ...requestOptions?.headers, + }, + contentType: "application/json", + queryParameters: _queryParams, + requestType: "json", + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return _response.body; + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 422: + throw new Humanloop.UnprocessableEntityError( + serializers.HttpValidationError.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.HumanloopTimeoutError("Timeout exceeded when calling GET /agents/{id}/serialize."); + case "unknown": + throw new errors.HumanloopError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * Deserialize an Agent from the .agent file format. + * + * This returns a subset of the attributes required by an Agent. + * This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc) + * + * @param {Humanloop.BodyDeserializeAgentsDeserializePost} request + * @param {Agents.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Humanloop.UnprocessableEntityError} + * + * @example + * await client.agents.deserialize({ + * agent: "agent" + * }) + */ + public async deserialize( + request: Humanloop.BodyDeserializeAgentsDeserializePost, + requestOptions?: Agents.RequestOptions, + ): Promise { + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.HumanloopEnvironment.Default, + "agents/deserialize", + ), + method: "POST", + headers: { + "X-Fern-Language": "JavaScript", + "X-Fern-SDK-Name": "humanloop", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...(await this._getCustomAuthorizationHeaders()), + ...requestOptions?.headers, + }, + contentType: "application/json", + requestType: "json", + body: serializers.BodyDeserializeAgentsDeserializePost.jsonOrThrow(request, { + unrecognizedObjectKeys: "strip", + }), + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.AgentKernelRequest.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 422: + throw new Humanloop.UnprocessableEntityError( + serializers.HttpValidationError.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.HumanloopTimeoutError("Timeout exceeded when calling POST /agents/deserialize."); + case "unknown": + throw new errors.HumanloopError({ + message: _response.error.errorMessage, + }); + } + } + + protected async _getCustomAuthorizationHeaders() { + const apiKeyValue = (await core.Supplier.get(this._options.apiKey)) ?? process?.env["HUMANLOOP_API_KEY"]; + return { "X-API-KEY": apiKeyValue }; + } +} diff --git a/src/api/resources/agents/client/index.ts b/src/api/resources/agents/client/index.ts new file mode 100644 index 00000000..415726b7 --- /dev/null +++ b/src/api/resources/agents/client/index.ts @@ -0,0 +1 @@ +export * from "./requests"; diff --git a/src/api/resources/agents/client/requests/AgentLogRequest.ts b/src/api/resources/agents/client/requests/AgentLogRequest.ts new file mode 100644 index 00000000..5852bf02 --- /dev/null +++ b/src/api/resources/agents/client/requests/AgentLogRequest.ts @@ -0,0 +1,132 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../../../../index"; + +/** + * @example + * { + * path: "Banking/Teller Agent", + * agent: { + * provider: "anthropic", + * endpoint: "chat", + * model: "claude-3-7-sonnet-latest", + * reasoningEffort: 1024, + * template: [{ + * role: "system", + * content: "You are a helpful digital assistant, helping users navigate our digital banking platform." + * }], + * maxIterations: 3, + * tools: [{ + * type: "file", + * link: { + * fileId: "pr_1234567890", + * versionId: "prv_1234567890" + * }, + * onAgentCall: "continue" + * }, { + * type: "inline", + * jsonSchema: { + * name: "stop", + * description: "Call this tool when you have finished your task.", + * parameters: { + * "type": "object", + * "properties": { + * "output": { + * "type": "string", + * "description": "The final output to return to the user." + * } + * }, + * "additionalProperties": false, + * "required": [ + * "output" + * ] + * }, + * strict: true + * }, + * onAgentCall: "stop" + * }] + * } + * } + */ +export interface AgentLogRequest { + /** + * A specific Version ID of the Agent to log to. + */ + versionId?: string; + /** + * Name of the Environment identifying a deployed version to log to. + */ + environment?: string; + /** Unique identifier for the Run to associate the Log to. */ + runId?: string; + /** Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. */ + path?: string; + /** ID for an existing Agent. */ + id?: string; + /** The message returned by the provider. */ + outputMessage?: Humanloop.ChatMessage; + /** Number of tokens in the prompt used to generate the output. */ + promptTokens?: number; + /** Number of reasoning tokens used to generate the output. */ + reasoningTokens?: number; + /** Number of tokens in the output generated by the model. */ + outputTokens?: number; + /** Cost in dollars associated to the tokens in the prompt. */ + promptCost?: number; + /** Cost in dollars associated to the tokens in the output. */ + outputCost?: number; + /** Reason the generation finished. */ + finishReason?: string; + /** The messages passed to the to provider chat endpoint. */ + messages?: Humanloop.ChatMessage[]; + /** + * Controls how the model uses tools. The following options are supported: + * - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + * - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + * - `'required'` means the model must call one or more of the provided tools. + * - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + */ + toolChoice?: Humanloop.AgentLogRequestToolChoice; + /** Details of your Agent. A new Agent version will be created if the provided details are new. */ + agent?: Humanloop.AgentKernelRequest; + /** When the logged event started. */ + startTime?: Date; + /** When the logged event ended. */ + endTime?: Date; + /** Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. */ + output?: string; + /** User defined timestamp for when the log was created. */ + createdAt?: Date; + /** Error message if the log is an error. */ + error?: string; + /** Duration of the logged event in seconds. */ + providerLatency?: number; + /** Captured log and debug statements. */ + stdout?: string; + /** Raw request sent to provider. */ + providerRequest?: Record; + /** Raw response received the provider. */ + providerResponse?: Record; + /** The inputs passed to the prompt template. */ + inputs?: Record; + /** Identifies where the model was called from. */ + source?: string; + /** Any additional metadata to record. */ + metadata?: Record; + /** Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. */ + logStatus?: Humanloop.LogStatus; + /** Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. */ + sourceDatapointId?: string; + /** The ID of the parent Log to nest this Log under in a Trace. */ + traceParentId?: string; + /** End-user ID related to the Log. */ + user?: string; + /** The name of the Environment the Log is associated to. */ + agentLogRequestEnvironment?: string; + /** Whether the request/response payloads will be stored on Humanloop. */ + save?: boolean; + /** This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. */ + logId?: string; +} diff --git a/src/api/resources/agents/client/requests/AgentRequest.ts b/src/api/resources/agents/client/requests/AgentRequest.ts new file mode 100644 index 00000000..7dd42f9d --- /dev/null +++ b/src/api/resources/agents/client/requests/AgentRequest.ts @@ -0,0 +1,103 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../../../../index"; + +/** + * @example + * { + * path: "Banking/Teller Agent", + * provider: "anthropic", + * endpoint: "chat", + * model: "claude-3-7-sonnet-latest", + * reasoningEffort: 1024, + * template: [{ + * role: "system", + * content: "You are a helpful digital assistant, helping users navigate our digital banking platform." + * }], + * maxIterations: 3, + * tools: [{ + * type: "inline", + * jsonSchema: { + * name: "stop", + * description: "Call this tool when you have finished your task.", + * parameters: { + * "type": "object", + * "properties": { + * "output": { + * "type": "string", + * "description": "The final output to return to the user." + * } + * }, + * "additionalProperties": false, + * "required": [ + * "output" + * ] + * }, + * strict: true + * }, + * onAgentCall: "stop" + * }], + * versionName: "teller-agent-v1", + * versionDescription: "Initial version" + * } + */ +export interface AgentRequest { + /** Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. */ + path?: string; + /** ID for an existing Agent. */ + id?: string; + /** The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) */ + model: string; + /** The provider model endpoint used. */ + endpoint?: Humanloop.ModelEndpoints; + /** + * The template contains the main structure and instructions for the model, including input variables for dynamic values. + * + * For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + * For completion models, provide a prompt template as a string. + * + * Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + */ + template?: Humanloop.AgentRequestTemplate; + /** The template language to use for rendering the template. */ + templateLanguage?: Humanloop.TemplateLanguage; + /** The company providing the underlying model service. */ + provider?: Humanloop.ModelProviders; + /** The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt */ + maxTokens?: number; + /** What sampling temperature to use when making a generation. Higher values means the model will be more creative. */ + temperature?: number; + /** An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. */ + topP?: number; + /** The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. */ + stop?: Humanloop.AgentRequestStop; + /** Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. */ + presencePenalty?: number; + /** Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. */ + frequencyPenalty?: number; + /** Other parameter values to be passed to the provider call. */ + other?: Record; + /** If specified, model will make a best effort to sample deterministically, but it is not guaranteed. */ + seed?: number; + /** The format of the response. Only `{"type": "json_object"}` is currently supported for chat. */ + responseFormat?: Humanloop.ResponseFormat; + /** Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. */ + reasoningEffort?: Humanloop.AgentRequestReasoningEffort; + tools?: Humanloop.AgentRequestToolsItem[]; + /** Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. */ + attributes?: Record; + /** The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called. */ + maxIterations?: number; + /** Unique name for the Prompt version. Each Prompt can only have one version with a given name. */ + versionName?: string; + /** Description of the Version. */ + versionDescription?: string; + /** Description of the Prompt. */ + description?: string; + /** List of tags associated with this prompt. */ + tags?: string[]; + /** Long description of the Prompt. */ + readme?: string; +} diff --git a/src/api/resources/agents/client/requests/AgentsCallRequest.ts b/src/api/resources/agents/client/requests/AgentsCallRequest.ts new file mode 100644 index 00000000..1f554921 --- /dev/null +++ b/src/api/resources/agents/client/requests/AgentsCallRequest.ts @@ -0,0 +1,72 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../../../../index"; + +/** + * @example + * { + * path: "Banking/Teller Agent", + * messages: [{ + * role: "user", + * content: "I'd like to deposit $1000 to my savings account from my checking account." + * }] + * } + */ +export interface AgentsCallRequest { + /** + * A specific Version ID of the Agent to log to. + */ + versionId?: string; + /** + * Name of the Environment identifying a deployed version to log to. + */ + environment?: string; + /** Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. */ + path?: string; + /** ID for an existing Agent. */ + id?: string; + /** The messages passed to the to provider chat endpoint. */ + messages?: Humanloop.ChatMessage[]; + /** + * Controls how the model uses tools. The following options are supported: + * - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + * - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + * - `'required'` means the model must call one or more of the provided tools. + * - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + */ + toolChoice?: Humanloop.AgentsCallRequestToolChoice; + /** Details of your Agent. A new Agent version will be created if the provided details are new. */ + agent?: Humanloop.AgentKernelRequest; + /** The inputs passed to the prompt template. */ + inputs?: Record; + /** Identifies where the model was called from. */ + source?: string; + /** Any additional metadata to record. */ + metadata?: Record; + /** When the logged event started. */ + startTime?: Date; + /** When the logged event ended. */ + endTime?: Date; + /** Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. */ + logStatus?: Humanloop.LogStatus; + /** Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. */ + sourceDatapointId?: string; + /** The ID of the parent Log to nest this Log under in a Trace. */ + traceParentId?: string; + /** End-user ID related to the Log. */ + user?: string; + /** The name of the Environment the Log is associated to. */ + agentsCallRequestEnvironment?: string; + /** Whether the request/response payloads will be stored on Humanloop. */ + save?: boolean; + /** This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. */ + logId?: string; + /** API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. */ + providerApiKeys?: Humanloop.ProviderApiKeys; + /** Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. */ + returnInputs?: boolean; + /** If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false. */ + includeTraceChildren?: boolean; +} diff --git a/src/api/resources/agents/client/requests/AgentsCallStreamRequest.ts b/src/api/resources/agents/client/requests/AgentsCallStreamRequest.ts new file mode 100644 index 00000000..46ef71f9 --- /dev/null +++ b/src/api/resources/agents/client/requests/AgentsCallStreamRequest.ts @@ -0,0 +1,66 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../../../../index"; + +/** + * @example + * {} + */ +export interface AgentsCallStreamRequest { + /** + * A specific Version ID of the Agent to log to. + */ + versionId?: string; + /** + * Name of the Environment identifying a deployed version to log to. + */ + environment?: string; + /** Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. */ + path?: string; + /** ID for an existing Agent. */ + id?: string; + /** The messages passed to the to provider chat endpoint. */ + messages?: Humanloop.ChatMessage[]; + /** + * Controls how the model uses tools. The following options are supported: + * - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + * - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + * - `'required'` means the model must call one or more of the provided tools. + * - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + */ + toolChoice?: Humanloop.AgentsCallStreamRequestToolChoice; + /** Details of your Agent. A new Agent version will be created if the provided details are new. */ + agent?: Humanloop.AgentKernelRequest; + /** The inputs passed to the prompt template. */ + inputs?: Record; + /** Identifies where the model was called from. */ + source?: string; + /** Any additional metadata to record. */ + metadata?: Record; + /** When the logged event started. */ + startTime?: Date; + /** When the logged event ended. */ + endTime?: Date; + /** Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. */ + logStatus?: Humanloop.LogStatus; + /** Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. */ + sourceDatapointId?: string; + /** The ID of the parent Log to nest this Log under in a Trace. */ + traceParentId?: string; + /** End-user ID related to the Log. */ + user?: string; + /** The name of the Environment the Log is associated to. */ + agentsCallStreamRequestEnvironment?: string; + /** Whether the request/response payloads will be stored on Humanloop. */ + save?: boolean; + /** This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. */ + logId?: string; + /** API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. */ + providerApiKeys?: Humanloop.ProviderApiKeys; + /** Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. */ + returnInputs?: boolean; + /** If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false. */ + includeTraceChildren?: boolean; +} diff --git a/src/api/resources/agents/client/requests/AgentsContinueCallRequest.ts b/src/api/resources/agents/client/requests/AgentsContinueCallRequest.ts new file mode 100644 index 00000000..8f9049e7 --- /dev/null +++ b/src/api/resources/agents/client/requests/AgentsContinueCallRequest.ts @@ -0,0 +1,27 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../../../../index"; + +/** + * @example + * { + * logId: "log_1234567890", + * messages: [{ + * role: "tool", + * content: "{\"type\": \"checking\", \"balance\": 5200}", + * toolCallId: "tc_1234567890" + * }] + * } + */ +export interface AgentsContinueCallRequest { + /** This identifies the Agent Log to continue. */ + logId: string; + /** The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls. */ + messages: Humanloop.ChatMessage[]; + /** API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. */ + providerApiKeys?: Humanloop.ProviderApiKeys; + /** If true, populate `trace_children` for the returned Agent Log. Defaults to false. */ + includeTraceChildren?: boolean; +} diff --git a/src/api/resources/agents/client/requests/AgentsContinueCallStreamRequest.ts b/src/api/resources/agents/client/requests/AgentsContinueCallStreamRequest.ts new file mode 100644 index 00000000..e9b1abd9 --- /dev/null +++ b/src/api/resources/agents/client/requests/AgentsContinueCallStreamRequest.ts @@ -0,0 +1,25 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../../../../index"; + +/** + * @example + * { + * logId: "log_id", + * messages: [{ + * role: "user" + * }] + * } + */ +export interface AgentsContinueCallStreamRequest { + /** This identifies the Agent Log to continue. */ + logId: string; + /** The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls. */ + messages: Humanloop.ChatMessage[]; + /** API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. */ + providerApiKeys?: Humanloop.ProviderApiKeys; + /** If true, populate `trace_children` for the returned Agent Log. Defaults to false. */ + includeTraceChildren?: boolean; +} diff --git a/src/api/resources/agents/client/requests/AgentsContinueRequest.ts b/src/api/resources/agents/client/requests/AgentsContinueRequest.ts new file mode 100644 index 00000000..eb9aee76 --- /dev/null +++ b/src/api/resources/agents/client/requests/AgentsContinueRequest.ts @@ -0,0 +1,25 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../../../../index"; + +/** + * @example + * { + * logId: "log_id", + * messages: [{ + * role: "user" + * }] + * } + */ +export interface AgentsContinueCallRequest { + /** This identifies the Agent Log to continue. */ + logId: string; + /** The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls. */ + messages: Humanloop.ChatMessage[]; + /** API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. */ + providerApiKeys?: Humanloop.ProviderApiKeys; + /** If true, populate `trace_children` for the returned Agent Log. Defaults to false. */ + includeTraceChildren?: boolean; +} diff --git a/src/api/resources/agents/client/requests/AgentsContinueStreamRequest.ts b/src/api/resources/agents/client/requests/AgentsContinueStreamRequest.ts new file mode 100644 index 00000000..f5648052 --- /dev/null +++ b/src/api/resources/agents/client/requests/AgentsContinueStreamRequest.ts @@ -0,0 +1,25 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../../../../index"; + +/** + * @example + * { + * logId: "log_id", + * messages: [{ + * role: "user" + * }] + * } + */ +export interface AgentsContinueStreamRequest { + /** This identifies the Agent Log to continue. */ + logId: string; + /** The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls. */ + messages: Humanloop.ChatMessage[]; + /** API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. */ + providerApiKeys?: Humanloop.ProviderApiKeys; + /** If true, populate `trace_children` for the returned Agent Log. Defaults to false. */ + includeTraceChildren?: boolean; +} diff --git a/src/api/resources/agents/client/requests/BodyDeserializeAgentsDeserializePost.ts b/src/api/resources/agents/client/requests/BodyDeserializeAgentsDeserializePost.ts new file mode 100644 index 00000000..4c71e9dc --- /dev/null +++ b/src/api/resources/agents/client/requests/BodyDeserializeAgentsDeserializePost.ts @@ -0,0 +1,13 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * @example + * { + * agent: "agent" + * } + */ +export interface BodyDeserializeAgentsDeserializePost { + agent: string; +} diff --git a/src/api/resources/agents/client/requests/GetAgentsIdGetRequest.ts b/src/api/resources/agents/client/requests/GetAgentsIdGetRequest.ts new file mode 100644 index 00000000..75206d60 --- /dev/null +++ b/src/api/resources/agents/client/requests/GetAgentsIdGetRequest.ts @@ -0,0 +1,18 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * @example + * {} + */ +export interface GetAgentsIdGetRequest { + /** + * A specific Version ID of the Agent to retrieve. + */ + versionId?: string; + /** + * Name of the Environment to retrieve a deployed Version from. + */ + environment?: string; +} diff --git a/src/api/resources/agents/client/requests/ListAgentsGetRequest.ts b/src/api/resources/agents/client/requests/ListAgentsGetRequest.ts new file mode 100644 index 00000000..2a81c3f7 --- /dev/null +++ b/src/api/resources/agents/client/requests/ListAgentsGetRequest.ts @@ -0,0 +1,38 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../../../../index"; + +/** + * @example + * { + * size: 1 + * } + */ +export interface ListAgentsGetRequest { + /** + * Page number for pagination. + */ + page?: number; + /** + * Page size for pagination. Number of Agents to fetch. + */ + size?: number; + /** + * Case-insensitive filter for Agent name. + */ + name?: string; + /** + * Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users. + */ + userFilter?: string; + /** + * Field to sort Agents by + */ + sortBy?: Humanloop.ProjectSortBy; + /** + * Direction to sort by. + */ + order?: Humanloop.SortOrder; +} diff --git a/src/api/resources/agents/client/requests/ListVersionsAgentsIdVersionsGetRequest.ts b/src/api/resources/agents/client/requests/ListVersionsAgentsIdVersionsGetRequest.ts new file mode 100644 index 00000000..5f08fbc0 --- /dev/null +++ b/src/api/resources/agents/client/requests/ListVersionsAgentsIdVersionsGetRequest.ts @@ -0,0 +1,14 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * @example + * {} + */ +export interface ListVersionsAgentsIdVersionsGetRequest { + /** + * Whether to include Evaluator aggregate results for the versions in the response + */ + evaluatorAggregates?: boolean; +} diff --git a/src/api/resources/agents/client/requests/SerializeAgentsIdSerializeGetRequest.ts b/src/api/resources/agents/client/requests/SerializeAgentsIdSerializeGetRequest.ts new file mode 100644 index 00000000..283262eb --- /dev/null +++ b/src/api/resources/agents/client/requests/SerializeAgentsIdSerializeGetRequest.ts @@ -0,0 +1,18 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * @example + * {} + */ +export interface SerializeAgentsIdSerializeGetRequest { + /** + * A specific Version ID of the Agent to retrieve. + */ + versionId?: string; + /** + * Name of the Environment to retrieve a deployed Version from. + */ + environment?: string; +} diff --git a/src/api/resources/agents/client/requests/SetDeploymentAgentsIdEnvironmentsEnvironmentIdPostRequest.ts b/src/api/resources/agents/client/requests/SetDeploymentAgentsIdEnvironmentsEnvironmentIdPostRequest.ts new file mode 100644 index 00000000..73ddbe8c --- /dev/null +++ b/src/api/resources/agents/client/requests/SetDeploymentAgentsIdEnvironmentsEnvironmentIdPostRequest.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * @example + * { + * versionId: "version_id" + * } + */ +export interface SetDeploymentAgentsIdEnvironmentsEnvironmentIdPostRequest { + /** + * Unique identifier for the specific version of the Agent. + */ + versionId: string; +} diff --git a/src/api/resources/agents/client/requests/UpdateAgentLogRequest.ts b/src/api/resources/agents/client/requests/UpdateAgentLogRequest.ts new file mode 100644 index 00000000..d55a2289 --- /dev/null +++ b/src/api/resources/agents/client/requests/UpdateAgentLogRequest.ts @@ -0,0 +1,37 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../../../../index"; + +/** + * @example + * { + * messages: [{ + * role: "user", + * content: "I need to withdraw $1000" + * }, { + * role: "assistant", + * content: "Of course! Would you like to use your savings or checking account?" + * }], + * outputMessage: { + * role: "assistant", + * content: "I'm sorry, I can't help with that." + * }, + * logStatus: "complete" + * } + */ +export interface UpdateAgentLogRequest { + /** List of chat messages that were used as an input to the Flow. */ + messages?: Humanloop.ChatMessage[]; + /** The output message returned by this Flow. */ + outputMessage?: Humanloop.ChatMessage; + /** The inputs passed to the Flow Log. */ + inputs?: Record; + /** The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`. */ + output?: string; + /** The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`. */ + error?: string; + /** Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs. */ + logStatus?: Humanloop.LogStatus; +} diff --git a/src/api/resources/agents/client/requests/UpdateAgentRequest.ts b/src/api/resources/agents/client/requests/UpdateAgentRequest.ts new file mode 100644 index 00000000..0363e61c --- /dev/null +++ b/src/api/resources/agents/client/requests/UpdateAgentRequest.ts @@ -0,0 +1,18 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * @example + * { + * path: "new directory/new name" + * } + */ +export interface UpdateAgentRequest { + /** Path of the Flow including the Flow name, which is used as a unique identifier. */ + path?: string; + /** Name of the Flow. */ + name?: string; + /** Unique identifier for the Directory to move Flow to. Starts with `dir_`. */ + directoryId?: string; +} diff --git a/src/api/resources/agents/client/requests/index.ts b/src/api/resources/agents/client/requests/index.ts new file mode 100644 index 00000000..c01b48ec --- /dev/null +++ b/src/api/resources/agents/client/requests/index.ts @@ -0,0 +1,14 @@ +export { type AgentLogRequest } from "./AgentLogRequest"; +export { type UpdateAgentLogRequest } from "./UpdateAgentLogRequest"; +export { type AgentsCallStreamRequest } from "./AgentsCallStreamRequest"; +export { type AgentsCallRequest } from "./AgentsCallRequest"; +export { type AgentsContinueCallStreamRequest } from "./AgentsContinueCallStreamRequest"; +export { type AgentsContinueCallRequest } from "./AgentsContinueCallRequest"; +export { type ListAgentsGetRequest } from "./ListAgentsGetRequest"; +export { type AgentRequest } from "./AgentRequest"; +export { type GetAgentsIdGetRequest } from "./GetAgentsIdGetRequest"; +export { type UpdateAgentRequest } from "./UpdateAgentRequest"; +export { type ListVersionsAgentsIdVersionsGetRequest } from "./ListVersionsAgentsIdVersionsGetRequest"; +export { type SetDeploymentAgentsIdEnvironmentsEnvironmentIdPostRequest } from "./SetDeploymentAgentsIdEnvironmentsEnvironmentIdPostRequest"; +export { type SerializeAgentsIdSerializeGetRequest } from "./SerializeAgentsIdSerializeGetRequest"; +export { type BodyDeserializeAgentsDeserializePost } from "./BodyDeserializeAgentsDeserializePost"; diff --git a/src/api/resources/agents/index.ts b/src/api/resources/agents/index.ts new file mode 100644 index 00000000..c9240f83 --- /dev/null +++ b/src/api/resources/agents/index.ts @@ -0,0 +1,2 @@ +export * from "./types"; +export * from "./client"; diff --git a/src/api/resources/agents/types/AgentLogRequestToolChoice.ts b/src/api/resources/agents/types/AgentLogRequestToolChoice.ts new file mode 100644 index 00000000..d704a5f1 --- /dev/null +++ b/src/api/resources/agents/types/AgentLogRequestToolChoice.ts @@ -0,0 +1,14 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../../../index"; + +/** + * Controls how the model uses tools. The following options are supported: + * - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + * - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + * - `'required'` means the model must call one or more of the provided tools. + * - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + */ +export type AgentLogRequestToolChoice = "none" | "auto" | "required" | Humanloop.ToolChoice; diff --git a/src/api/resources/agents/types/AgentRequestReasoningEffort.ts b/src/api/resources/agents/types/AgentRequestReasoningEffort.ts new file mode 100644 index 00000000..41181f64 --- /dev/null +++ b/src/api/resources/agents/types/AgentRequestReasoningEffort.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../../../index"; + +/** + * Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. + */ +export type AgentRequestReasoningEffort = Humanloop.OpenAiReasoningEffort | number; diff --git a/src/api/resources/agents/types/AgentRequestStop.ts b/src/api/resources/agents/types/AgentRequestStop.ts new file mode 100644 index 00000000..bd491af4 --- /dev/null +++ b/src/api/resources/agents/types/AgentRequestStop.ts @@ -0,0 +1,8 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + */ +export type AgentRequestStop = string | string[]; diff --git a/src/api/resources/agents/types/AgentRequestTemplate.ts b/src/api/resources/agents/types/AgentRequestTemplate.ts new file mode 100644 index 00000000..099cecb7 --- /dev/null +++ b/src/api/resources/agents/types/AgentRequestTemplate.ts @@ -0,0 +1,15 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../../../index"; + +/** + * The template contains the main structure and instructions for the model, including input variables for dynamic values. + * + * For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + * For completion models, provide a prompt template as a string. + * + * Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + */ +export type AgentRequestTemplate = string | Humanloop.ChatMessage[]; diff --git a/src/api/resources/agents/types/AgentRequestToolsItem.ts b/src/api/resources/agents/types/AgentRequestToolsItem.ts new file mode 100644 index 00000000..2033e534 --- /dev/null +++ b/src/api/resources/agents/types/AgentRequestToolsItem.ts @@ -0,0 +1,7 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../../../index"; + +export type AgentRequestToolsItem = Humanloop.AgentLinkedFileRequest | Humanloop.AgentInlineTool; diff --git a/src/api/resources/agents/types/AgentsCallRequestToolChoice.ts b/src/api/resources/agents/types/AgentsCallRequestToolChoice.ts new file mode 100644 index 00000000..42a8e41d --- /dev/null +++ b/src/api/resources/agents/types/AgentsCallRequestToolChoice.ts @@ -0,0 +1,14 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../../../index"; + +/** + * Controls how the model uses tools. The following options are supported: + * - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + * - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + * - `'required'` means the model must call one or more of the provided tools. + * - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + */ +export type AgentsCallRequestToolChoice = "none" | "auto" | "required" | Humanloop.ToolChoice; diff --git a/src/api/resources/agents/types/AgentsCallStreamRequestToolChoice.ts b/src/api/resources/agents/types/AgentsCallStreamRequestToolChoice.ts new file mode 100644 index 00000000..45184790 --- /dev/null +++ b/src/api/resources/agents/types/AgentsCallStreamRequestToolChoice.ts @@ -0,0 +1,14 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../../../index"; + +/** + * Controls how the model uses tools. The following options are supported: + * - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + * - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + * - `'required'` means the model must call one or more of the provided tools. + * - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + */ +export type AgentsCallStreamRequestToolChoice = "none" | "auto" | "required" | Humanloop.ToolChoice; diff --git a/src/api/resources/agents/types/index.ts b/src/api/resources/agents/types/index.ts new file mode 100644 index 00000000..8a8a004f --- /dev/null +++ b/src/api/resources/agents/types/index.ts @@ -0,0 +1,7 @@ +export * from "./AgentLogRequestToolChoice"; +export * from "./AgentsCallStreamRequestToolChoice"; +export * from "./AgentsCallRequestToolChoice"; +export * from "./AgentRequestTemplate"; +export * from "./AgentRequestStop"; +export * from "./AgentRequestReasoningEffort"; +export * from "./AgentRequestToolsItem"; diff --git a/src/api/resources/datasets/client/Client.ts b/src/api/resources/datasets/client/Client.ts index 20ddb95a..cd17b306 100644 --- a/src/api/resources/datasets/client/Client.ts +++ b/src/api/resources/datasets/client/Client.ts @@ -107,8 +107,8 @@ export class Datasets { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -275,8 +275,8 @@ export class Datasets { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -388,8 +388,8 @@ export class Datasets { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -470,8 +470,8 @@ export class Datasets { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -550,8 +550,8 @@ export class Datasets { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -656,8 +656,8 @@ export class Datasets { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -763,8 +763,8 @@ export class Datasets { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -850,8 +850,8 @@ export class Datasets { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -934,8 +934,8 @@ export class Datasets { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -1055,8 +1055,8 @@ export class Datasets { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -1155,8 +1155,8 @@ export class Datasets { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -1246,8 +1246,8 @@ export class Datasets { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -1326,8 +1326,8 @@ export class Datasets { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), diff --git a/src/api/resources/directories/client/Client.ts b/src/api/resources/directories/client/Client.ts index 2e244488..a9e2a6e9 100644 --- a/src/api/resources/directories/client/Client.ts +++ b/src/api/resources/directories/client/Client.ts @@ -55,8 +55,8 @@ export class Directories { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -139,8 +139,8 @@ export class Directories { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -224,8 +224,8 @@ export class Directories { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -307,8 +307,8 @@ export class Directories { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -387,8 +387,8 @@ export class Directories { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), diff --git a/src/api/resources/evaluations/client/Client.ts b/src/api/resources/evaluations/client/Client.ts index 1699c9fe..93e1d4e1 100644 --- a/src/api/resources/evaluations/client/Client.ts +++ b/src/api/resources/evaluations/client/Client.ts @@ -88,8 +88,8 @@ export class Evaluations { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -190,8 +190,8 @@ export class Evaluations { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -283,8 +283,8 @@ export class Evaluations { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -374,8 +374,8 @@ export class Evaluations { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -463,8 +463,8 @@ export class Evaluations { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -546,8 +546,8 @@ export class Evaluations { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -624,8 +624,8 @@ export class Evaluations { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -723,8 +723,8 @@ export class Evaluations { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -813,8 +813,8 @@ export class Evaluations { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -894,8 +894,8 @@ export class Evaluations { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -981,8 +981,8 @@ export class Evaluations { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -1074,8 +1074,8 @@ export class Evaluations { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -1161,8 +1161,8 @@ export class Evaluations { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -1267,8 +1267,8 @@ export class Evaluations { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), diff --git a/src/api/resources/evaluators/client/Client.ts b/src/api/resources/evaluators/client/Client.ts index c83f1463..c39703a9 100644 --- a/src/api/resources/evaluators/client/Client.ts +++ b/src/api/resources/evaluators/client/Client.ts @@ -73,8 +73,8 @@ export class Evaluators { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -186,8 +186,8 @@ export class Evaluators { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -296,8 +296,8 @@ export class Evaluators { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -396,8 +396,8 @@ export class Evaluators { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -478,8 +478,8 @@ export class Evaluators { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -560,8 +560,8 @@ export class Evaluators { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -653,8 +653,8 @@ export class Evaluators { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -740,8 +740,8 @@ export class Evaluators { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -824,8 +824,8 @@ export class Evaluators { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -923,8 +923,8 @@ export class Evaluators { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -1015,8 +1015,8 @@ export class Evaluators { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -1095,8 +1095,8 @@ export class Evaluators { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -1186,8 +1186,8 @@ export class Evaluators { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), diff --git a/src/api/resources/files/client/Client.ts b/src/api/resources/files/client/Client.ts index e4fc177f..83aa8ff1 100644 --- a/src/api/resources/files/client/Client.ts +++ b/src/api/resources/files/client/Client.ts @@ -47,7 +47,7 @@ export class Files { public async listFiles( request: Humanloop.ListFilesFilesGetRequest = {}, requestOptions?: Files.RequestOptions, - ): Promise { + ): Promise { const { page, size, name, template, type: type_, environment, sortBy, order } = request; const _queryParams: Record = {}; if (page != null) { @@ -101,8 +101,8 @@ export class Files { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -116,7 +116,7 @@ export class Files { abortSignal: requestOptions?.abortSignal, }); if (_response.ok) { - return serializers.PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse.parseOrThrow( + return serializers.PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse.parseOrThrow( _response.body, { unrecognizedObjectKeys: "passthrough", @@ -197,8 +197,8 @@ export class Files { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), diff --git a/src/api/resources/files/types/RetrieveByPathFilesRetrieveByPathPostResponse.ts b/src/api/resources/files/types/RetrieveByPathFilesRetrieveByPathPostResponse.ts index 3adf1e5e..6919fd90 100644 --- a/src/api/resources/files/types/RetrieveByPathFilesRetrieveByPathPostResponse.ts +++ b/src/api/resources/files/types/RetrieveByPathFilesRetrieveByPathPostResponse.ts @@ -9,4 +9,5 @@ export type RetrieveByPathFilesRetrieveByPathPostResponse = | Humanloop.ToolResponse | Humanloop.DatasetResponse | Humanloop.EvaluatorResponse - | Humanloop.FlowResponse; + | Humanloop.FlowResponse + | Humanloop.AgentResponse; diff --git a/src/api/resources/flows/client/Client.ts b/src/api/resources/flows/client/Client.ts index 08d713a2..e548186e 100644 --- a/src/api/resources/flows/client/Client.ts +++ b/src/api/resources/flows/client/Client.ts @@ -98,8 +98,8 @@ export class Flows { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -198,8 +198,8 @@ export class Flows { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -298,8 +298,8 @@ export class Flows { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -380,8 +380,8 @@ export class Flows { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -462,8 +462,8 @@ export class Flows { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -572,8 +572,8 @@ export class Flows { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -688,8 +688,8 @@ export class Flows { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -781,8 +781,8 @@ export class Flows { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -868,8 +868,8 @@ export class Flows { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -952,8 +952,8 @@ export class Flows { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -1051,8 +1051,8 @@ export class Flows { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -1143,8 +1143,8 @@ export class Flows { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -1223,8 +1223,8 @@ export class Flows { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -1316,8 +1316,8 @@ export class Flows { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), diff --git a/src/api/resources/index.ts b/src/api/resources/index.ts index e0a7e133..c3e903dd 100644 --- a/src/api/resources/index.ts +++ b/src/api/resources/index.ts @@ -4,6 +4,8 @@ export * as datasets from "./datasets"; export * from "./datasets/types"; export * as evaluators from "./evaluators"; export * from "./evaluators/types"; +export * as agents from "./agents"; +export * from "./agents/types"; export * as files from "./files"; export * from "./files/types"; export * as evaluations from "./evaluations"; @@ -17,6 +19,7 @@ export * from "./tools/client/requests"; export * from "./datasets/client/requests"; export * from "./evaluators/client/requests"; export * from "./flows/client/requests"; +export * from "./agents/client/requests"; export * from "./directories/client/requests"; export * from "./files/client/requests"; export * from "./evaluations/client/requests"; diff --git a/src/api/resources/logs/client/Client.ts b/src/api/resources/logs/client/Client.ts index 4d592529..e8e0dcb3 100644 --- a/src/api/resources/logs/client/Client.ts +++ b/src/api/resources/logs/client/Client.ts @@ -5,8 +5,8 @@ import * as environments from "../../../../environments"; import * as core from "../../../../core"; import * as Humanloop from "../../../index"; -import * as serializers from "../../../../serialization/index"; import urlJoin from "url-join"; +import * as serializers from "../../../../serialization/index"; import * as errors from "../../../../errors/index"; export declare namespace Logs { @@ -69,7 +69,6 @@ export class Logs { page, size, versionId, - versionStatus, id, search, metadataSearch, @@ -91,11 +90,6 @@ export class Logs { if (versionId != null) { _queryParams["version_id"] = versionId; } - if (versionStatus != null) { - _queryParams["version_status"] = serializers.VersionStatus.jsonOrThrow(versionStatus, { - unrecognizedObjectKeys: "strip", - }); - } if (id != null) { if (Array.isArray(id)) { _queryParams["id"] = id.map((item) => item); @@ -142,8 +136,8 @@ export class Logs { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -248,8 +242,8 @@ export class Logs { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -324,8 +318,8 @@ export class Logs { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), diff --git a/src/api/resources/logs/client/requests/ListLogsGetRequest.ts b/src/api/resources/logs/client/requests/ListLogsGetRequest.ts index 3ce9e985..1b9660c7 100644 --- a/src/api/resources/logs/client/requests/ListLogsGetRequest.ts +++ b/src/api/resources/logs/client/requests/ListLogsGetRequest.ts @@ -2,8 +2,6 @@ * This file was auto-generated by Fern from our API Definition. */ -import * as Humanloop from "../../../../index"; - /** * @example * { @@ -28,10 +26,6 @@ export interface ListLogsGetRequest { * If provided, only Logs belonging to the specified Version will be returned. */ versionId?: string; - /** - * If provided, only Logs belonging to Versions with the specified status will be returned. - */ - versionStatus?: Humanloop.VersionStatus; /** * If provided, returns Logs whose IDs contain any of the specified values as substrings. */ @@ -65,7 +59,7 @@ export interface ListLogsGetRequest { */ sample?: number; /** - * If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow Logs. + * If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow or Agent Logs. */ includeTraceChildren?: boolean; } diff --git a/src/api/resources/prompts/client/Client.ts b/src/api/resources/prompts/client/Client.ts index aa34779c..9541f43a 100644 --- a/src/api/resources/prompts/client/Client.ts +++ b/src/api/resources/prompts/client/Client.ts @@ -129,8 +129,8 @@ export class Prompts { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -221,8 +221,8 @@ export class Prompts { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -321,8 +321,8 @@ export class Prompts { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -502,8 +502,8 @@ export class Prompts { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -618,8 +618,8 @@ export class Prompts { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -731,8 +731,8 @@ export class Prompts { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -831,8 +831,8 @@ export class Prompts { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -913,8 +913,8 @@ export class Prompts { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -995,8 +995,8 @@ export class Prompts { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -1099,8 +1099,8 @@ export class Prompts { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -1193,8 +1193,8 @@ export class Prompts { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -1280,8 +1280,8 @@ export class Prompts { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -1364,8 +1364,8 @@ export class Prompts { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -1463,8 +1463,8 @@ export class Prompts { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -1555,8 +1555,8 @@ export class Prompts { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -1635,8 +1635,8 @@ export class Prompts { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -1728,8 +1728,8 @@ export class Prompts { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -1789,6 +1789,195 @@ export class Prompts { } } + /** + * Serialize a Prompt to the .prompt file format. + * + * Useful for storing the Prompt with your code in a version control system, + * or for editing with an AI tool. + * + * By default, the deployed version of the Prompt is returned. Use the query parameters + * `version_id` or `environment` to target a specific version of the Prompt. + * + * @param {string} id - Unique identifier for Prompt. + * @param {Humanloop.SerializePromptsIdSerializeGetRequest} request + * @param {Prompts.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Humanloop.UnprocessableEntityError} + * + * @example + * await client.prompts.serialize("id") + */ + public async serialize( + id: string, + request: Humanloop.SerializePromptsIdSerializeGetRequest = {}, + requestOptions?: Prompts.RequestOptions, + ): Promise { + const { versionId, environment } = request; + const _queryParams: Record = {}; + if (versionId != null) { + _queryParams["version_id"] = versionId; + } + + if (environment != null) { + _queryParams["environment"] = environment; + } + + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.HumanloopEnvironment.Default, + `prompts/${encodeURIComponent(id)}/serialize`, + ), + method: "GET", + headers: { + "X-Fern-Language": "JavaScript", + "X-Fern-SDK-Name": "humanloop", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...(await this._getCustomAuthorizationHeaders()), + ...requestOptions?.headers, + }, + contentType: "application/json", + queryParameters: _queryParams, + requestType: "json", + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return _response.body; + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 422: + throw new Humanloop.UnprocessableEntityError( + serializers.HttpValidationError.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.HumanloopTimeoutError("Timeout exceeded when calling GET /prompts/{id}/serialize."); + case "unknown": + throw new errors.HumanloopError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * Deserialize a Prompt from the .prompt file format. + * + * This returns a subset of the attributes required by a Prompt. + * This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc) + * + * @param {Humanloop.BodyDeserializePromptsDeserializePost} request + * @param {Prompts.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Humanloop.UnprocessableEntityError} + * + * @example + * await client.prompts.deserialize({ + * prompt: "prompt" + * }) + */ + public async deserialize( + request: Humanloop.BodyDeserializePromptsDeserializePost, + requestOptions?: Prompts.RequestOptions, + ): Promise { + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.HumanloopEnvironment.Default, + "prompts/deserialize", + ), + method: "POST", + headers: { + "X-Fern-Language": "JavaScript", + "X-Fern-SDK-Name": "humanloop", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...(await this._getCustomAuthorizationHeaders()), + ...requestOptions?.headers, + }, + contentType: "application/json", + requestType: "json", + body: serializers.BodyDeserializePromptsDeserializePost.jsonOrThrow(request, { + unrecognizedObjectKeys: "strip", + }), + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.PromptKernelRequest.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 422: + throw new Humanloop.UnprocessableEntityError( + serializers.HttpValidationError.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.HumanloopTimeoutError("Timeout exceeded when calling POST /prompts/deserialize."); + case "unknown": + throw new errors.HumanloopError({ + message: _response.error.errorMessage, + }); + } + } + protected async _getCustomAuthorizationHeaders() { const apiKeyValue = (await core.Supplier.get(this._options.apiKey)) ?? process?.env["HUMANLOOP_API_KEY"]; return { "X-API-KEY": apiKeyValue }; diff --git a/src/api/resources/prompts/client/requests/BodyDeserializePromptsDeserializePost.ts b/src/api/resources/prompts/client/requests/BodyDeserializePromptsDeserializePost.ts new file mode 100644 index 00000000..4bb58f75 --- /dev/null +++ b/src/api/resources/prompts/client/requests/BodyDeserializePromptsDeserializePost.ts @@ -0,0 +1,13 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * @example + * { + * prompt: "prompt" + * } + */ +export interface BodyDeserializePromptsDeserializePost { + prompt: string; +} diff --git a/src/api/resources/prompts/client/requests/PromptRequest.ts b/src/api/resources/prompts/client/requests/PromptRequest.ts index ab484b21..c1659476 100644 --- a/src/api/resources/prompts/client/requests/PromptRequest.ts +++ b/src/api/resources/prompts/client/requests/PromptRequest.ts @@ -61,8 +61,8 @@ export interface PromptRequest { seed?: number; /** The format of the response. Only `{"type": "json_object"}` is currently supported for chat. */ responseFormat?: Humanloop.ResponseFormat; - /** Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models. */ - reasoningEffort?: Humanloop.ReasoningEffort; + /** Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. */ + reasoningEffort?: Humanloop.PromptRequestReasoningEffort; /** The tool specification that the model can choose to call if Tool calling is supported. */ tools?: Humanloop.ToolFunction[]; /** The IDs of the Tools in your organization that the model can choose to call if Tool calling is supported. The default deployed version of that tool is called. */ diff --git a/src/api/resources/prompts/client/requests/SerializePromptsIdSerializeGetRequest.ts b/src/api/resources/prompts/client/requests/SerializePromptsIdSerializeGetRequest.ts new file mode 100644 index 00000000..71ada1c6 --- /dev/null +++ b/src/api/resources/prompts/client/requests/SerializePromptsIdSerializeGetRequest.ts @@ -0,0 +1,18 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * @example + * {} + */ +export interface SerializePromptsIdSerializeGetRequest { + /** + * A specific Version ID of the Prompt to retrieve. + */ + versionId?: string; + /** + * Name of the Environment to retrieve a deployed Version from. + */ + environment?: string; +} diff --git a/src/api/resources/prompts/client/requests/index.ts b/src/api/resources/prompts/client/requests/index.ts index f17b6d9f..d579eda7 100644 --- a/src/api/resources/prompts/client/requests/index.ts +++ b/src/api/resources/prompts/client/requests/index.ts @@ -9,3 +9,5 @@ export { type UpdatePromptRequest } from "./UpdatePromptRequest"; export { type PopulatePromptsIdPopulatePostRequest } from "./PopulatePromptsIdPopulatePostRequest"; export { type ListVersionsPromptsIdVersionsGetRequest } from "./ListVersionsPromptsIdVersionsGetRequest"; export { type SetDeploymentPromptsIdEnvironmentsEnvironmentIdPostRequest } from "./SetDeploymentPromptsIdEnvironmentsEnvironmentIdPostRequest"; +export { type SerializePromptsIdSerializeGetRequest } from "./SerializePromptsIdSerializeGetRequest"; +export { type BodyDeserializePromptsDeserializePost } from "./BodyDeserializePromptsDeserializePost"; diff --git a/src/api/resources/prompts/types/PromptRequestReasoningEffort.ts b/src/api/resources/prompts/types/PromptRequestReasoningEffort.ts new file mode 100644 index 00000000..0981bb2a --- /dev/null +++ b/src/api/resources/prompts/types/PromptRequestReasoningEffort.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../../../index"; + +/** + * Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. + */ +export type PromptRequestReasoningEffort = Humanloop.OpenAiReasoningEffort | number; diff --git a/src/api/resources/prompts/types/index.ts b/src/api/resources/prompts/types/index.ts index a5d3901d..8265b2f3 100644 --- a/src/api/resources/prompts/types/index.ts +++ b/src/api/resources/prompts/types/index.ts @@ -4,3 +4,4 @@ export * from "./PromptsCallStreamRequestToolChoice"; export * from "./PromptsCallRequestToolChoice"; export * from "./PromptRequestTemplate"; export * from "./PromptRequestStop"; +export * from "./PromptRequestReasoningEffort"; diff --git a/src/api/resources/tools/client/Client.ts b/src/api/resources/tools/client/Client.ts index 70e9e713..c5a81fb5 100644 --- a/src/api/resources/tools/client/Client.ts +++ b/src/api/resources/tools/client/Client.ts @@ -33,6 +33,112 @@ export declare namespace Tools { export class Tools { constructor(protected readonly _options: Tools.Options = {}) {} + /** + * Call a Tool. + * + * Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop. + * + * You can use query parameters `version_id`, or `environment`, to target + * an existing version of the Tool. Otherwise, the default deployed version will be chosen. + * + * Instead of targeting an existing version explicitly, you can instead pass in + * Tool details in the request body. In this case, we will check if the details correspond + * to an existing version of the Tool. If they do not, we will create a new version. This is helpful + * in the case where you are storing or deriving your Tool details in code. + * + * @param {Humanloop.ToolCallRequest} request + * @param {Tools.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Humanloop.UnprocessableEntityError} + * + * @example + * await client.tools.call() + */ + public async call( + request: Humanloop.ToolCallRequest = {}, + requestOptions?: Tools.RequestOptions, + ): Promise { + const { versionId, environment, ..._body } = request; + const _queryParams: Record = {}; + if (versionId != null) { + _queryParams["version_id"] = versionId; + } + + if (environment != null) { + _queryParams["environment"] = environment; + } + + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.HumanloopEnvironment.Default, + "tools/call", + ), + method: "POST", + headers: { + "X-Fern-Language": "JavaScript", + "X-Fern-SDK-Name": "humanloop", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...(await this._getCustomAuthorizationHeaders()), + ...requestOptions?.headers, + }, + contentType: "application/json", + queryParameters: _queryParams, + requestType: "json", + body: serializers.ToolCallRequest.jsonOrThrow(_body, { unrecognizedObjectKeys: "strip" }), + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.ToolCallResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 422: + throw new Humanloop.UnprocessableEntityError( + serializers.HttpValidationError.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.HumanloopTimeoutError("Timeout exceeded when calling POST /tools/call."); + case "unknown": + throw new errors.HumanloopError({ + message: _response.error.errorMessage, + }); + } + } + /** * Log to a Tool. * @@ -105,8 +211,8 @@ export class Tools { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -197,8 +303,8 @@ export class Tools { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -307,8 +413,8 @@ export class Tools { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -430,8 +536,8 @@ export class Tools { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -530,8 +636,8 @@ export class Tools { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -612,8 +718,8 @@ export class Tools { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -694,8 +800,8 @@ export class Tools { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -787,8 +893,8 @@ export class Tools { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -874,8 +980,8 @@ export class Tools { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -958,8 +1064,8 @@ export class Tools { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -1057,8 +1163,8 @@ export class Tools { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -1149,8 +1255,8 @@ export class Tools { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -1229,8 +1335,8 @@ export class Tools { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -1322,8 +1428,8 @@ export class Tools { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.20", - "User-Agent": "humanloop/0.8.20", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -1383,6 +1489,270 @@ export class Tools { } } + /** + * @param {string} id - Unique identifier for File. + * @param {Tools.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Humanloop.UnprocessableEntityError} + * + * @example + * await client.tools.getEnvironmentVariables("id") + */ + public async getEnvironmentVariables( + id: string, + requestOptions?: Tools.RequestOptions, + ): Promise { + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.HumanloopEnvironment.Default, + `tools/${encodeURIComponent(id)}/environment-variables`, + ), + method: "GET", + headers: { + "X-Fern-Language": "JavaScript", + "X-Fern-SDK-Name": "humanloop", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...(await this._getCustomAuthorizationHeaders()), + ...requestOptions?.headers, + }, + contentType: "application/json", + requestType: "json", + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.tools.getEnvironmentVariables.Response.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 422: + throw new Humanloop.UnprocessableEntityError( + serializers.HttpValidationError.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.HumanloopTimeoutError( + "Timeout exceeded when calling GET /tools/{id}/environment-variables.", + ); + case "unknown": + throw new errors.HumanloopError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * Add an environment variable to a Tool. + * + * @param {string} id - Unique identifier for Tool. + * @param {Humanloop.FileEnvironmentVariableRequest[]} request + * @param {Tools.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Humanloop.UnprocessableEntityError} + * + * @example + * await client.tools.addEnvironmentVariable("id", [{ + * name: "name", + * value: "value" + * }]) + */ + public async addEnvironmentVariable( + id: string, + request: Humanloop.FileEnvironmentVariableRequest[], + requestOptions?: Tools.RequestOptions, + ): Promise { + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.HumanloopEnvironment.Default, + `tools/${encodeURIComponent(id)}/environment-variables`, + ), + method: "POST", + headers: { + "X-Fern-Language": "JavaScript", + "X-Fern-SDK-Name": "humanloop", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...(await this._getCustomAuthorizationHeaders()), + ...requestOptions?.headers, + }, + contentType: "application/json", + requestType: "json", + body: serializers.tools.addEnvironmentVariable.Request.jsonOrThrow(request, { + unrecognizedObjectKeys: "strip", + }), + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.tools.addEnvironmentVariable.Response.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 422: + throw new Humanloop.UnprocessableEntityError( + serializers.HttpValidationError.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.HumanloopTimeoutError( + "Timeout exceeded when calling POST /tools/{id}/environment-variables.", + ); + case "unknown": + throw new errors.HumanloopError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * @param {string} id - Unique identifier for File. + * @param {string} name - Name of the Environment Variable to delete. + * @param {Tools.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Humanloop.UnprocessableEntityError} + * + * @example + * await client.tools.deleteEnvironmentVariable("id", "name") + */ + public async deleteEnvironmentVariable( + id: string, + name: string, + requestOptions?: Tools.RequestOptions, + ): Promise { + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.baseUrl)) ?? + (await core.Supplier.get(this._options.environment)) ?? + environments.HumanloopEnvironment.Default, + `tools/${encodeURIComponent(id)}/environment-variables/${encodeURIComponent(name)}`, + ), + method: "DELETE", + headers: { + "X-Fern-Language": "JavaScript", + "X-Fern-SDK-Name": "humanloop", + "X-Fern-SDK-Version": "0.8.21-beta1", + "User-Agent": "humanloop/0.8.21-beta1", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...(await this._getCustomAuthorizationHeaders()), + ...requestOptions?.headers, + }, + contentType: "application/json", + requestType: "json", + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.tools.deleteEnvironmentVariable.Response.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 422: + throw new Humanloop.UnprocessableEntityError( + serializers.HttpValidationError.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }), + ); + default: + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.HumanloopError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.HumanloopTimeoutError( + "Timeout exceeded when calling DELETE /tools/{id}/environment-variables/{name}.", + ); + case "unknown": + throw new errors.HumanloopError({ + message: _response.error.errorMessage, + }); + } + } + protected async _getCustomAuthorizationHeaders() { const apiKeyValue = (await core.Supplier.get(this._options.apiKey)) ?? process?.env["HUMANLOOP_API_KEY"]; return { "X-API-KEY": apiKeyValue }; diff --git a/src/api/resources/tools/client/requests/ToolCallRequest.ts b/src/api/resources/tools/client/requests/ToolCallRequest.ts new file mode 100644 index 00000000..e8705964 --- /dev/null +++ b/src/api/resources/tools/client/requests/ToolCallRequest.ts @@ -0,0 +1,50 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../../../../index"; + +/** + * @example + * {} + */ +export interface ToolCallRequest { + /** + * A specific Version ID of the Tool to call. + */ + versionId?: string; + /** + * Name of the Environment identifying a deployed version to call. + */ + environment?: string; + /** Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. */ + path?: string; + /** ID for an existing Tool. */ + id?: string; + /** Details of your Tool. A new Tool version will be created if the provided details are new. */ + tool?: Humanloop.ToolKernelRequest; + /** The inputs passed to the prompt template. */ + inputs?: Record; + /** Identifies where the model was called from. */ + source?: string; + /** Any additional metadata to record. */ + metadata?: Record; + /** When the logged event started. */ + startTime?: Date; + /** When the logged event ended. */ + endTime?: Date; + /** Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. */ + logStatus?: Humanloop.LogStatus; + /** Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. */ + sourceDatapointId?: string; + /** The ID of the parent Log to nest this Log under in a Trace. */ + traceParentId?: string; + /** End-user ID related to the Log. */ + user?: string; + /** The name of the Environment the Log is associated to. */ + toolCallRequestEnvironment?: string; + /** Whether the request/response payloads will be stored on Humanloop. */ + save?: boolean; + /** This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. */ + logId?: string; +} diff --git a/src/api/resources/tools/client/requests/ToolLogRequest.ts b/src/api/resources/tools/client/requests/ToolLogRequest.ts index f67ac8a7..3992c004 100644 --- a/src/api/resources/tools/client/requests/ToolLogRequest.ts +++ b/src/api/resources/tools/client/requests/ToolLogRequest.ts @@ -49,6 +49,8 @@ export interface ToolLogRequest { path?: string; /** ID for an existing Tool. */ id?: string; + /** Details of your Tool. A new Tool version will be created if the provided details are new. */ + tool?: Humanloop.ToolKernelRequest; /** When the logged event started. */ startTime?: Date; /** When the logged event ended. */ @@ -87,6 +89,4 @@ export interface ToolLogRequest { save?: boolean; /** This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. */ logId?: string; - /** Details of your Tool. A new Tool version will be created if the provided details are new. */ - tool?: Humanloop.ToolKernelRequest; } diff --git a/src/api/resources/tools/client/requests/index.ts b/src/api/resources/tools/client/requests/index.ts index e112a4a5..9c8b0519 100644 --- a/src/api/resources/tools/client/requests/index.ts +++ b/src/api/resources/tools/client/requests/index.ts @@ -1,3 +1,4 @@ +export { type ToolCallRequest } from "./ToolCallRequest"; export { type ToolLogRequest } from "./ToolLogRequest"; export { type ToolLogUpdateRequest } from "./ToolLogUpdateRequest"; export { type ListToolsGetRequest } from "./ListToolsGetRequest"; diff --git a/src/api/types/AgentCallResponse.ts b/src/api/types/AgentCallResponse.ts new file mode 100644 index 00000000..4fa9ef16 --- /dev/null +++ b/src/api/types/AgentCallResponse.ts @@ -0,0 +1,89 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../index"; + +/** + * Response model for a Agent call. + */ +export interface AgentCallResponse { + /** The message returned by the provider. */ + outputMessage?: Humanloop.ChatMessage; + /** Number of tokens in the prompt used to generate the output. */ + promptTokens?: number; + /** Number of reasoning tokens used to generate the output. */ + reasoningTokens?: number; + /** Number of tokens in the output generated by the model. */ + outputTokens?: number; + /** Cost in dollars associated to the tokens in the prompt. */ + promptCost?: number; + /** Cost in dollars associated to the tokens in the output. */ + outputCost?: number; + /** Reason the generation finished. */ + finishReason?: string; + /** The messages passed to the to provider chat endpoint. */ + messages?: Humanloop.ChatMessage[]; + /** + * Controls how the model uses tools. The following options are supported: + * - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + * - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + * - `'required'` means the model must call one or more of the provided tools. + * - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + */ + toolChoice?: Humanloop.AgentCallResponseToolChoice; + /** Agent that generated the Log. */ + agent: Humanloop.AgentResponse; + /** When the logged event started. */ + startTime?: Date; + /** When the logged event ended. */ + endTime?: Date; + /** Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. */ + output?: string; + /** User defined timestamp for when the log was created. */ + createdAt?: Date; + /** Error message if the log is an error. */ + error?: string; + /** Duration of the logged event in seconds. */ + providerLatency?: number; + /** Captured log and debug statements. */ + stdout?: string; + /** Raw request sent to provider. */ + providerRequest?: Record; + /** Raw response received the provider. */ + providerResponse?: Record; + /** The inputs passed to the prompt template. */ + inputs?: Record; + /** Identifies where the model was called from. */ + source?: string; + /** Any additional metadata to record. */ + metadata?: Record; + /** Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message. */ + logStatus?: Humanloop.LogStatus; + /** Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. */ + sourceDatapointId?: string; + /** The ID of the parent Log to nest this Log under in a Trace. */ + traceParentId?: string; + /** Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations */ + batches?: string[]; + /** End-user ID related to the Log. */ + user?: string; + /** The name of the Environment the Log is associated to. */ + environment?: string; + /** Whether the request/response payloads will be stored on Humanloop. */ + save?: boolean; + /** This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. */ + logId?: string; + /** Unique identifier for the Log. */ + id: string; + /** List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. */ + evaluatorLogs: Humanloop.EvaluatorLogResponse[]; + /** Identifier for the Flow that the Trace belongs to. */ + traceFlowId?: string; + /** Identifier for the Trace that the Log belongs to. */ + traceId?: string; + /** Logs nested under this Log in the Trace. */ + traceChildren?: Humanloop.LogResponse[]; + /** The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue. */ + previousAgentMessage?: Humanloop.ChatMessage; +} diff --git a/src/api/types/AgentCallResponseToolChoice.ts b/src/api/types/AgentCallResponseToolChoice.ts new file mode 100644 index 00000000..03797dcf --- /dev/null +++ b/src/api/types/AgentCallResponseToolChoice.ts @@ -0,0 +1,14 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../index"; + +/** + * Controls how the model uses tools. The following options are supported: + * - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + * - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + * - `'required'` means the model must call one or more of the provided tools. + * - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + */ +export type AgentCallResponseToolChoice = "none" | "auto" | "required" | Humanloop.ToolChoice; diff --git a/src/api/types/AgentCallStreamResponse.ts b/src/api/types/AgentCallStreamResponse.ts new file mode 100644 index 00000000..04c6d9fb --- /dev/null +++ b/src/api/types/AgentCallStreamResponse.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../index"; + +/** + * Response model for calling Agent in streaming mode. + */ +export interface AgentCallStreamResponse { + logId: string; + message: string; + payload?: Humanloop.AgentCallStreamResponsePayload; + type: Humanloop.EventType; + createdAt: Date; +} diff --git a/src/api/types/AgentCallStreamResponsePayload.ts b/src/api/types/AgentCallStreamResponsePayload.ts new file mode 100644 index 00000000..67414449 --- /dev/null +++ b/src/api/types/AgentCallStreamResponsePayload.ts @@ -0,0 +1,7 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../index"; + +export type AgentCallStreamResponsePayload = Humanloop.LogStreamResponse | Humanloop.LogResponse | Humanloop.ToolCall; diff --git a/src/api/types/AgentContinueCallResponse.ts b/src/api/types/AgentContinueCallResponse.ts new file mode 100644 index 00000000..068de3c1 --- /dev/null +++ b/src/api/types/AgentContinueCallResponse.ts @@ -0,0 +1,89 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../index"; + +/** + * Response model for continuing an Agent call. + */ +export interface AgentContinueCallResponse { + /** The message returned by the provider. */ + outputMessage?: Humanloop.ChatMessage; + /** Number of tokens in the prompt used to generate the output. */ + promptTokens?: number; + /** Number of reasoning tokens used to generate the output. */ + reasoningTokens?: number; + /** Number of tokens in the output generated by the model. */ + outputTokens?: number; + /** Cost in dollars associated to the tokens in the prompt. */ + promptCost?: number; + /** Cost in dollars associated to the tokens in the output. */ + outputCost?: number; + /** Reason the generation finished. */ + finishReason?: string; + /** The messages passed to the to provider chat endpoint. */ + messages?: Humanloop.ChatMessage[]; + /** + * Controls how the model uses tools. The following options are supported: + * - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + * - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + * - `'required'` means the model must call one or more of the provided tools. + * - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + */ + toolChoice?: Humanloop.AgentContinueCallResponseToolChoice; + /** Agent that generated the Log. */ + agent: Humanloop.AgentResponse; + /** When the logged event started. */ + startTime?: Date; + /** When the logged event ended. */ + endTime?: Date; + /** Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. */ + output?: string; + /** User defined timestamp for when the log was created. */ + createdAt?: Date; + /** Error message if the log is an error. */ + error?: string; + /** Duration of the logged event in seconds. */ + providerLatency?: number; + /** Captured log and debug statements. */ + stdout?: string; + /** Raw request sent to provider. */ + providerRequest?: Record; + /** Raw response received the provider. */ + providerResponse?: Record; + /** The inputs passed to the prompt template. */ + inputs?: Record; + /** Identifies where the model was called from. */ + source?: string; + /** Any additional metadata to record. */ + metadata?: Record; + /** Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message. */ + logStatus?: Humanloop.LogStatus; + /** Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. */ + sourceDatapointId?: string; + /** The ID of the parent Log to nest this Log under in a Trace. */ + traceParentId?: string; + /** Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations */ + batches?: string[]; + /** End-user ID related to the Log. */ + user?: string; + /** The name of the Environment the Log is associated to. */ + environment?: string; + /** Whether the request/response payloads will be stored on Humanloop. */ + save?: boolean; + /** This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. */ + logId?: string; + /** Unique identifier for the Log. */ + id: string; + /** List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. */ + evaluatorLogs: Humanloop.EvaluatorLogResponse[]; + /** Identifier for the Flow that the Trace belongs to. */ + traceFlowId?: string; + /** Identifier for the Trace that the Log belongs to. */ + traceId?: string; + /** Logs nested under this Log in the Trace. */ + traceChildren?: Humanloop.LogResponse[]; + /** The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue. */ + previousAgentMessage?: Humanloop.ChatMessage; +} diff --git a/src/api/types/AgentContinueCallResponseToolChoice.ts b/src/api/types/AgentContinueCallResponseToolChoice.ts new file mode 100644 index 00000000..1617acb7 --- /dev/null +++ b/src/api/types/AgentContinueCallResponseToolChoice.ts @@ -0,0 +1,14 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../index"; + +/** + * Controls how the model uses tools. The following options are supported: + * - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + * - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + * - `'required'` means the model must call one or more of the provided tools. + * - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + */ +export type AgentContinueCallResponseToolChoice = "none" | "auto" | "required" | Humanloop.ToolChoice; diff --git a/src/api/types/AgentContinueCallStreamResponse.ts b/src/api/types/AgentContinueCallStreamResponse.ts new file mode 100644 index 00000000..9187b8e5 --- /dev/null +++ b/src/api/types/AgentContinueCallStreamResponse.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../index"; + +/** + * Response model for continuing an Agent call in streaming mode. + */ +export interface AgentContinueCallStreamResponse { + logId: string; + message: string; + payload?: Humanloop.AgentContinueCallStreamResponsePayload; + type: Humanloop.EventType; + createdAt: Date; +} diff --git a/src/api/types/AgentContinueCallStreamResponsePayload.ts b/src/api/types/AgentContinueCallStreamResponsePayload.ts new file mode 100644 index 00000000..a92062b5 --- /dev/null +++ b/src/api/types/AgentContinueCallStreamResponsePayload.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../index"; + +export type AgentContinueCallStreamResponsePayload = + | Humanloop.LogStreamResponse + | Humanloop.LogResponse + | Humanloop.ToolCall; diff --git a/src/api/types/AgentInlineTool.ts b/src/api/types/AgentInlineTool.ts new file mode 100644 index 00000000..91722b9b --- /dev/null +++ b/src/api/types/AgentInlineTool.ts @@ -0,0 +1,11 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../index"; + +export interface AgentInlineTool { + type: "inline"; + jsonSchema: Humanloop.ToolFunction; + onAgentCall?: Humanloop.OnAgentCallEnum; +} diff --git a/src/api/types/AgentKernelRequest.ts b/src/api/types/AgentKernelRequest.ts new file mode 100644 index 00000000..c43e0906 --- /dev/null +++ b/src/api/types/AgentKernelRequest.ts @@ -0,0 +1,55 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../index"; + +/** + * Base class used by both PromptKernelRequest and AgentKernelRequest. + * + * Contains the consistent Prompt-related fields. + */ +export interface AgentKernelRequest { + /** The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) */ + model: string; + /** The provider model endpoint used. */ + endpoint?: Humanloop.ModelEndpoints; + /** + * The template contains the main structure and instructions for the model, including input variables for dynamic values. + * + * For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + * For completion models, provide a prompt template as a string. + * + * Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + */ + template?: Humanloop.AgentKernelRequestTemplate; + /** The template language to use for rendering the template. */ + templateLanguage?: Humanloop.TemplateLanguage; + /** The company providing the underlying model service. */ + provider?: Humanloop.ModelProviders; + /** The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt */ + maxTokens?: number; + /** What sampling temperature to use when making a generation. Higher values means the model will be more creative. */ + temperature?: number; + /** An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. */ + topP?: number; + /** The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. */ + stop?: Humanloop.AgentKernelRequestStop; + /** Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. */ + presencePenalty?: number; + /** Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. */ + frequencyPenalty?: number; + /** Other parameter values to be passed to the provider call. */ + other?: Record; + /** If specified, model will make a best effort to sample deterministically, but it is not guaranteed. */ + seed?: number; + /** The format of the response. Only `{"type": "json_object"}` is currently supported for chat. */ + responseFormat?: Humanloop.ResponseFormat; + /** Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. */ + reasoningEffort?: Humanloop.AgentKernelRequestReasoningEffort; + tools?: Humanloop.AgentKernelRequestToolsItem[]; + /** Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. */ + attributes?: Record; + /** The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called. */ + maxIterations?: number; +} diff --git a/src/api/types/AgentKernelRequestReasoningEffort.ts b/src/api/types/AgentKernelRequestReasoningEffort.ts new file mode 100644 index 00000000..eca9d494 --- /dev/null +++ b/src/api/types/AgentKernelRequestReasoningEffort.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../index"; + +/** + * Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. + */ +export type AgentKernelRequestReasoningEffort = Humanloop.OpenAiReasoningEffort | number; diff --git a/src/api/types/AgentKernelRequestStop.ts b/src/api/types/AgentKernelRequestStop.ts new file mode 100644 index 00000000..cb66c6e5 --- /dev/null +++ b/src/api/types/AgentKernelRequestStop.ts @@ -0,0 +1,8 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + */ +export type AgentKernelRequestStop = string | string[]; diff --git a/src/api/types/AgentKernelRequestTemplate.ts b/src/api/types/AgentKernelRequestTemplate.ts new file mode 100644 index 00000000..ed8c876a --- /dev/null +++ b/src/api/types/AgentKernelRequestTemplate.ts @@ -0,0 +1,15 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../index"; + +/** + * The template contains the main structure and instructions for the model, including input variables for dynamic values. + * + * For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + * For completion models, provide a prompt template as a string. + * + * Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + */ +export type AgentKernelRequestTemplate = string | Humanloop.ChatMessage[]; diff --git a/src/api/types/AgentKernelRequestToolsItem.ts b/src/api/types/AgentKernelRequestToolsItem.ts new file mode 100644 index 00000000..f3b126f9 --- /dev/null +++ b/src/api/types/AgentKernelRequestToolsItem.ts @@ -0,0 +1,7 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../index"; + +export type AgentKernelRequestToolsItem = Humanloop.AgentLinkedFileRequest | Humanloop.AgentInlineTool; diff --git a/src/api/types/AgentLinkedFileRequest.ts b/src/api/types/AgentLinkedFileRequest.ts new file mode 100644 index 00000000..a95d4d29 --- /dev/null +++ b/src/api/types/AgentLinkedFileRequest.ts @@ -0,0 +1,11 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../index"; + +export interface AgentLinkedFileRequest { + type: "file"; + link: Humanloop.LinkedFileRequest; + onAgentCall?: Humanloop.OnAgentCallEnum; +} diff --git a/src/api/types/AgentLinkedFileResponse.ts b/src/api/types/AgentLinkedFileResponse.ts new file mode 100644 index 00000000..9f69ad35 --- /dev/null +++ b/src/api/types/AgentLinkedFileResponse.ts @@ -0,0 +1,12 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../index"; + +export interface AgentLinkedFileResponse { + type: "file"; + link: Humanloop.LinkedFileRequest; + onAgentCall?: Humanloop.OnAgentCallEnum; + file?: Humanloop.AgentLinkedFileResponseFile; +} diff --git a/src/api/types/PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem.ts b/src/api/types/AgentLinkedFileResponseFile.ts similarity index 62% rename from src/api/types/PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem.ts rename to src/api/types/AgentLinkedFileResponseFile.ts index 260bc9bd..16b31656 100644 --- a/src/api/types/PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem.ts +++ b/src/api/types/AgentLinkedFileResponseFile.ts @@ -4,9 +4,10 @@ import * as Humanloop from "../index"; -export type PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem = +export type AgentLinkedFileResponseFile = | Humanloop.PromptResponse | Humanloop.ToolResponse | Humanloop.DatasetResponse | Humanloop.EvaluatorResponse - | Humanloop.FlowResponse; + | Humanloop.FlowResponse + | Humanloop.AgentResponse; diff --git a/src/api/types/AgentLogResponse.ts b/src/api/types/AgentLogResponse.ts new file mode 100644 index 00000000..6349e664 --- /dev/null +++ b/src/api/types/AgentLogResponse.ts @@ -0,0 +1,87 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../index"; + +/** + * General request for creating a Log + */ +export interface AgentLogResponse { + /** The message returned by the provider. */ + outputMessage?: Humanloop.ChatMessage; + /** Number of tokens in the prompt used to generate the output. */ + promptTokens?: number; + /** Number of reasoning tokens used to generate the output. */ + reasoningTokens?: number; + /** Number of tokens in the output generated by the model. */ + outputTokens?: number; + /** Cost in dollars associated to the tokens in the prompt. */ + promptCost?: number; + /** Cost in dollars associated to the tokens in the output. */ + outputCost?: number; + /** Reason the generation finished. */ + finishReason?: string; + /** The messages passed to the to provider chat endpoint. */ + messages?: Humanloop.ChatMessage[]; + /** + * Controls how the model uses tools. The following options are supported: + * - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + * - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + * - `'required'` means the model must call one or more of the provided tools. + * - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + */ + toolChoice?: Humanloop.AgentLogResponseToolChoice; + /** Agent that generated the Log. */ + agent: Humanloop.AgentResponse; + /** When the logged event started. */ + startTime?: Date; + /** When the logged event ended. */ + endTime?: Date; + /** Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. */ + output?: string; + /** User defined timestamp for when the log was created. */ + createdAt?: Date; + /** Error message if the log is an error. */ + error?: string; + /** Duration of the logged event in seconds. */ + providerLatency?: number; + /** Captured log and debug statements. */ + stdout?: string; + /** Raw request sent to provider. */ + providerRequest?: Record; + /** Raw response received the provider. */ + providerResponse?: Record; + /** The inputs passed to the prompt template. */ + inputs?: Record; + /** Identifies where the model was called from. */ + source?: string; + /** Any additional metadata to record. */ + metadata?: Record; + /** Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. */ + logStatus?: Humanloop.LogStatus; + /** Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. */ + sourceDatapointId?: string; + /** The ID of the parent Log to nest this Log under in a Trace. */ + traceParentId?: string; + /** Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations */ + batches?: string[]; + /** End-user ID related to the Log. */ + user?: string; + /** The name of the Environment the Log is associated to. */ + environment?: string; + /** Whether the request/response payloads will be stored on Humanloop. */ + save?: boolean; + /** This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. */ + logId?: string; + /** Unique identifier for the Log. */ + id: string; + /** List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. */ + evaluatorLogs: Humanloop.EvaluatorLogResponse[]; + /** Identifier for the Flow that the Trace belongs to. */ + traceFlowId?: string; + /** Identifier for the Trace that the Log belongs to. */ + traceId?: string; + /** Logs nested under this Log in the Trace. */ + traceChildren?: Humanloop.LogResponse[]; +} diff --git a/src/api/types/AgentLogResponseToolChoice.ts b/src/api/types/AgentLogResponseToolChoice.ts new file mode 100644 index 00000000..8f6e704d --- /dev/null +++ b/src/api/types/AgentLogResponseToolChoice.ts @@ -0,0 +1,14 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../index"; + +/** + * Controls how the model uses tools. The following options are supported: + * - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + * - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + * - `'required'` means the model must call one or more of the provided tools. + * - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + */ +export type AgentLogResponseToolChoice = "none" | "auto" | "required" | Humanloop.ToolChoice; diff --git a/src/api/types/AgentLogStreamResponse.ts b/src/api/types/AgentLogStreamResponse.ts new file mode 100644 index 00000000..cba89fe7 --- /dev/null +++ b/src/api/types/AgentLogStreamResponse.ts @@ -0,0 +1,41 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../index"; + +/** + * Prompt specific log output shared between PromptLogRequest and PromptCallLogResponse. + */ +export interface AgentLogStreamResponse { + /** Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. */ + output?: string; + /** User defined timestamp for when the log was created. */ + createdAt?: Date; + /** Error message if the log is an error. */ + error?: string; + /** Duration of the logged event in seconds. */ + providerLatency?: number; + /** Captured log and debug statements. */ + stdout?: string; + /** The message returned by the provider. */ + outputMessage?: Humanloop.ChatMessage; + /** Number of tokens in the prompt used to generate the output. */ + promptTokens?: number; + /** Number of reasoning tokens used to generate the output. */ + reasoningTokens?: number; + /** Number of tokens in the output generated by the model. */ + outputTokens?: number; + /** Cost in dollars associated to the tokens in the prompt. */ + promptCost?: number; + /** Cost in dollars associated to the tokens in the output. */ + outputCost?: number; + /** Reason the generation finished. */ + finishReason?: string; + /** ID of the log. */ + id: string; + /** ID of the Agent the log belongs to. */ + agentId: string; + /** ID of the specific version of the Agent. */ + versionId: string; +} diff --git a/src/api/types/AgentResponse.ts b/src/api/types/AgentResponse.ts new file mode 100644 index 00000000..b9251aac --- /dev/null +++ b/src/api/types/AgentResponse.ts @@ -0,0 +1,103 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../index"; + +/** + * Base type that all File Responses should inherit from. + * + * Attributes defined here are common to all File Responses and should be overridden + * in the inheriting classes with documentation and appropriate Field definitions. + */ +export interface AgentResponse { + /** Path of the Agent, including the name, which is used as a unique identifier. */ + path: string; + /** Unique identifier for the Agent. */ + id: string; + /** ID of the directory that the file is in on Humanloop. */ + directoryId?: string; + /** The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) */ + model: string; + /** The provider model endpoint used. */ + endpoint?: Humanloop.ModelEndpoints; + /** + * The template contains the main structure and instructions for the model, including input variables for dynamic values. + * + * For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + * For completion models, provide a prompt template as a string. + * + * Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + */ + template?: Humanloop.AgentResponseTemplate; + /** The template language to use for rendering the template. */ + templateLanguage?: Humanloop.TemplateLanguage; + /** The company providing the underlying model service. */ + provider?: Humanloop.ModelProviders; + /** The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt */ + maxTokens?: number; + /** What sampling temperature to use when making a generation. Higher values means the model will be more creative. */ + temperature?: number; + /** An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. */ + topP?: number; + /** The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. */ + stop?: Humanloop.AgentResponseStop; + /** Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. */ + presencePenalty?: number; + /** Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. */ + frequencyPenalty?: number; + /** Other parameter values to be passed to the provider call. */ + other?: Record; + /** If specified, model will make a best effort to sample deterministically, but it is not guaranteed. */ + seed?: number; + /** The format of the response. Only `{"type": "json_object"}` is currently supported for chat. */ + responseFormat?: Humanloop.ResponseFormat; + /** Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. */ + reasoningEffort?: Humanloop.AgentResponseReasoningEffort; + /** List of tools that the Agent can call. These can be linked files or inline tools. */ + tools: Humanloop.AgentResponseToolsItem[]; + /** Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. */ + attributes?: Record; + /** The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called. */ + maxIterations?: number; + /** Unique name for the Agent version. Version names must be unique for a given Agent. */ + versionName?: string; + /** Description of the version, e.g., the changes made in this version. */ + versionDescription?: string; + /** Description of the Agent. */ + description?: string; + /** List of tags associated with the file. */ + tags?: string[]; + /** Long description of the file. */ + readme?: string; + /** Name of the Agent. */ + name: string; + /** The JSON schema for the Prompt. */ + schema?: Record; + /** Unique identifier for the specific Agent Version. If no query params provided, the default deployed Agent Version is returned. */ + versionId: string; + type?: "agent"; + /** The list of environments the Agent Version is deployed to. */ + environments?: Humanloop.EnvironmentResponse[]; + createdAt: Date; + updatedAt: Date; + /** The user who created the Agent. */ + createdBy?: Humanloop.UserResponse | undefined; + /** The user who committed the Agent Version. */ + committedBy?: Humanloop.UserResponse | undefined; + /** The date and time the Agent Version was committed. */ + committedAt?: Date; + /** The status of the Agent Version. */ + status: Humanloop.VersionStatus; + lastUsedAt: Date; + /** The number of logs that have been generated for this Agent Version */ + versionLogsCount: number; + /** The number of logs that have been generated across all Agent Versions */ + totalLogsCount: number; + /** Inputs associated to the Agent. Inputs correspond to any of the variables used within the Agent template. */ + inputs: Humanloop.InputResponse[]; + /** Evaluators that have been attached to this Agent that are used for monitoring logs. */ + evaluators?: Humanloop.MonitoringEvaluatorResponse[]; + /** Aggregation of Evaluator results for the Agent Version. */ + evaluatorAggregates?: Humanloop.EvaluatorAggregate[]; +} diff --git a/src/api/types/AgentResponseReasoningEffort.ts b/src/api/types/AgentResponseReasoningEffort.ts new file mode 100644 index 00000000..885f8354 --- /dev/null +++ b/src/api/types/AgentResponseReasoningEffort.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../index"; + +/** + * Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. + */ +export type AgentResponseReasoningEffort = Humanloop.OpenAiReasoningEffort | number; diff --git a/src/api/types/AgentResponseStop.ts b/src/api/types/AgentResponseStop.ts new file mode 100644 index 00000000..9df91298 --- /dev/null +++ b/src/api/types/AgentResponseStop.ts @@ -0,0 +1,8 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + */ +export type AgentResponseStop = string | string[]; diff --git a/src/api/types/AgentResponseTemplate.ts b/src/api/types/AgentResponseTemplate.ts new file mode 100644 index 00000000..8ee8fc11 --- /dev/null +++ b/src/api/types/AgentResponseTemplate.ts @@ -0,0 +1,15 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../index"; + +/** + * The template contains the main structure and instructions for the model, including input variables for dynamic values. + * + * For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + * For completion models, provide a prompt template as a string. + * + * Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + */ +export type AgentResponseTemplate = string | Humanloop.ChatMessage[]; diff --git a/src/api/types/AgentResponseToolsItem.ts b/src/api/types/AgentResponseToolsItem.ts new file mode 100644 index 00000000..a69127ec --- /dev/null +++ b/src/api/types/AgentResponseToolsItem.ts @@ -0,0 +1,7 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../index"; + +export type AgentResponseToolsItem = Humanloop.AgentLinkedFileResponse | Humanloop.AgentInlineTool; diff --git a/src/api/types/AnthropicRedactedThinkingContent.ts b/src/api/types/AnthropicRedactedThinkingContent.ts new file mode 100644 index 00000000..a9e0485e --- /dev/null +++ b/src/api/types/AnthropicRedactedThinkingContent.ts @@ -0,0 +1,9 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface AnthropicRedactedThinkingContent { + type: "redacted_thinking"; + /** Thinking block Anthropic redacted for safety reasons. User is expected to pass the block back to Anthropic */ + data: string; +} diff --git a/src/api/types/AnthropicThinkingContent.ts b/src/api/types/AnthropicThinkingContent.ts new file mode 100644 index 00000000..e064143a --- /dev/null +++ b/src/api/types/AnthropicThinkingContent.ts @@ -0,0 +1,11 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface AnthropicThinkingContent { + type: "thinking"; + /** Model's chain-of-thought for providing the response. */ + thinking: string; + /** Cryptographic signature that verifies the thinking block was generated by Anthropic. */ + signature: string; +} diff --git a/src/api/types/ChatMessage.ts b/src/api/types/ChatMessage.ts index a410aa3f..966d569d 100644 --- a/src/api/types/ChatMessage.ts +++ b/src/api/types/ChatMessage.ts @@ -15,4 +15,6 @@ export interface ChatMessage { role: Humanloop.ChatRole; /** A list of tool calls requested by the assistant. */ toolCalls?: Humanloop.ToolCall[]; + /** Model's chain-of-thought for providing the response. Present on assistant messages if model supports it. */ + thinking?: Humanloop.ChatMessageThinkingItem[]; } diff --git a/src/api/types/ChatMessageThinkingItem.ts b/src/api/types/ChatMessageThinkingItem.ts new file mode 100644 index 00000000..ecb0eb00 --- /dev/null +++ b/src/api/types/ChatMessageThinkingItem.ts @@ -0,0 +1,7 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../index"; + +export type ChatMessageThinkingItem = Humanloop.AnthropicThinkingContent | Humanloop.AnthropicRedactedThinkingContent; diff --git a/src/api/types/CreateAgentLogResponse.ts b/src/api/types/CreateAgentLogResponse.ts new file mode 100644 index 00000000..fc111b43 --- /dev/null +++ b/src/api/types/CreateAgentLogResponse.ts @@ -0,0 +1,19 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../index"; + +/** + * Response for an Agent Log. + */ +export interface CreateAgentLogResponse { + /** Unique identifier for the Log. */ + id: string; + /** Unique identifier for the Agent. */ + agentId: string; + /** Unique identifier for the Agent Version. */ + versionId: string; + /** Status of the Agent Log. When a Agent Log is marked as `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Agent Logs. */ + logStatus?: Humanloop.LogStatus; +} diff --git a/src/api/types/DatasetResponse.ts b/src/api/types/DatasetResponse.ts index b65ac821..12b2b2e3 100644 --- a/src/api/types/DatasetResponse.ts +++ b/src/api/types/DatasetResponse.ts @@ -21,6 +21,8 @@ export interface DatasetResponse { name: string; /** Description of the Dataset. */ description?: string; + /** The JSON schema for the File. */ + schema?: Record; /** Long description of the file. */ readme?: string; /** List of tags associated with the file. */ diff --git a/src/api/types/DirectoryWithParentsAndChildrenResponseFilesItem.ts b/src/api/types/DirectoryWithParentsAndChildrenResponseFilesItem.ts index 6d61da80..12121f31 100644 --- a/src/api/types/DirectoryWithParentsAndChildrenResponseFilesItem.ts +++ b/src/api/types/DirectoryWithParentsAndChildrenResponseFilesItem.ts @@ -9,4 +9,5 @@ export type DirectoryWithParentsAndChildrenResponseFilesItem = | Humanloop.ToolResponse | Humanloop.EvaluatorResponse | Humanloop.DatasetResponse - | Humanloop.FlowResponse; + | Humanloop.FlowResponse + | Humanloop.AgentResponse; diff --git a/src/api/types/EvaluatorResponse.ts b/src/api/types/EvaluatorResponse.ts index d9630419..bfb6faf6 100644 --- a/src/api/types/EvaluatorResponse.ts +++ b/src/api/types/EvaluatorResponse.ts @@ -23,6 +23,8 @@ export interface EvaluatorResponse { name: string; /** Description of the Evaluator. */ description?: string; + /** The JSON schema for the File. */ + schema?: Record; /** Long description of the file. */ readme?: string; /** List of tags associated with the file. */ diff --git a/src/api/types/EventType.ts b/src/api/types/EventType.ts new file mode 100644 index 00000000..cba1c9f1 --- /dev/null +++ b/src/api/types/EventType.ts @@ -0,0 +1,34 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * An enumeration. + */ +export type EventType = + | "agent_turn_start" + | "agent_turn_suspend" + | "agent_turn_continue" + | "agent_turn_end" + | "agent_start" + | "agent_update" + | "agent_end" + | "tool_start" + | "tool_update" + | "tool_end" + | "error" + | "agent_generation_error"; +export const EventType = { + AgentTurnStart: "agent_turn_start", + AgentTurnSuspend: "agent_turn_suspend", + AgentTurnContinue: "agent_turn_continue", + AgentTurnEnd: "agent_turn_end", + AgentStart: "agent_start", + AgentUpdate: "agent_update", + AgentEnd: "agent_end", + ToolStart: "tool_start", + ToolUpdate: "tool_update", + ToolEnd: "tool_end", + Error: "error", + AgentGenerationError: "agent_generation_error", +} as const; diff --git a/src/api/types/FileEnvironmentResponseFile.ts b/src/api/types/FileEnvironmentResponseFile.ts index 6941ac11..cb3c798e 100644 --- a/src/api/types/FileEnvironmentResponseFile.ts +++ b/src/api/types/FileEnvironmentResponseFile.ts @@ -12,4 +12,5 @@ export type FileEnvironmentResponseFile = | Humanloop.ToolResponse | Humanloop.DatasetResponse | Humanloop.EvaluatorResponse - | Humanloop.FlowResponse; + | Humanloop.FlowResponse + | Humanloop.AgentResponse; diff --git a/src/api/types/FileEnvironmentVariableRequest.ts b/src/api/types/FileEnvironmentVariableRequest.ts new file mode 100644 index 00000000..00274bc2 --- /dev/null +++ b/src/api/types/FileEnvironmentVariableRequest.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface FileEnvironmentVariableRequest { + /** Name of the environment variable. */ + name: string; + /** Value of the environment variable. */ + value: string; +} diff --git a/src/api/types/FileType.ts b/src/api/types/FileType.ts index 35763ee0..1d4676e7 100644 --- a/src/api/types/FileType.ts +++ b/src/api/types/FileType.ts @@ -5,11 +5,12 @@ /** * Enum for File types. */ -export type FileType = "prompt" | "tool" | "dataset" | "evaluator" | "flow"; +export type FileType = "prompt" | "tool" | "dataset" | "evaluator" | "flow" | "agent"; export const FileType = { Prompt: "prompt", Tool: "tool", Dataset: "dataset", Evaluator: "evaluator", Flow: "flow", + Agent: "agent", } as const; diff --git a/src/api/types/FilesToolType.ts b/src/api/types/FilesToolType.ts index d96527e1..4d8bb11c 100644 --- a/src/api/types/FilesToolType.ts +++ b/src/api/types/FilesToolType.ts @@ -5,7 +5,14 @@ /** * Type of tool. */ -export type FilesToolType = "pinecone_search" | "google" | "mock" | "snippet" | "json_schema" | "get_api_call"; +export type FilesToolType = + | "pinecone_search" + | "google" + | "mock" + | "snippet" + | "json_schema" + | "get_api_call" + | "python"; export const FilesToolType = { PineconeSearch: "pinecone_search", Google: "google", @@ -13,4 +20,5 @@ export const FilesToolType = { Snippet: "snippet", JsonSchema: "json_schema", GetApiCall: "get_api_call", + Python: "python", } as const; diff --git a/src/api/types/FlowResponse.ts b/src/api/types/FlowResponse.ts index db13401b..1af64d3a 100644 --- a/src/api/types/FlowResponse.ts +++ b/src/api/types/FlowResponse.ts @@ -24,6 +24,8 @@ export interface FlowResponse { name: string; /** Description of the Flow. */ description?: string; + /** The JSON schema for the File. */ + schema?: Record; /** Long description of the file. */ readme?: string; /** List of tags associated with the file. */ diff --git a/src/api/types/LinkedFileRequest.ts b/src/api/types/LinkedFileRequest.ts new file mode 100644 index 00000000..88a2a13d --- /dev/null +++ b/src/api/types/LinkedFileRequest.ts @@ -0,0 +1,9 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface LinkedFileRequest { + fileId: string; + environmentId?: string; + versionId?: string; +} diff --git a/src/api/types/ListAgents.ts b/src/api/types/ListAgents.ts new file mode 100644 index 00000000..3510a066 --- /dev/null +++ b/src/api/types/ListAgents.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../index"; + +export interface ListAgents { + /** The list of Agents. */ + records: Humanloop.AgentResponse[]; +} diff --git a/src/api/types/LogResponse.ts b/src/api/types/LogResponse.ts index 68536c5e..aa15dc2c 100644 --- a/src/api/types/LogResponse.ts +++ b/src/api/types/LogResponse.ts @@ -8,4 +8,5 @@ export type LogResponse = | Humanloop.PromptLogResponse | Humanloop.ToolLogResponse | Humanloop.EvaluatorLogResponse - | Humanloop.FlowLogResponse; + | Humanloop.FlowLogResponse + | Humanloop.AgentLogResponse; diff --git a/src/api/types/LogStreamResponse.ts b/src/api/types/LogStreamResponse.ts new file mode 100644 index 00000000..488cfeac --- /dev/null +++ b/src/api/types/LogStreamResponse.ts @@ -0,0 +1,7 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../index"; + +export type LogStreamResponse = Humanloop.PromptCallStreamResponse | Humanloop.AgentLogStreamResponse; diff --git a/src/api/types/ModelProviders.ts b/src/api/types/ModelProviders.ts index f87ac682..6147b168 100644 --- a/src/api/types/ModelProviders.ts +++ b/src/api/types/ModelProviders.ts @@ -6,25 +6,25 @@ * Supported model providers. */ export type ModelProviders = - | "openai" - | "openai_azure" - | "mock" | "anthropic" | "bedrock" | "cohere" - | "replicate" + | "deepseek" | "google" | "groq" - | "deepseek"; + | "mock" + | "openai" + | "openai_azure" + | "replicate"; export const ModelProviders = { - Openai: "openai", - OpenaiAzure: "openai_azure", - Mock: "mock", Anthropic: "anthropic", Bedrock: "bedrock", Cohere: "cohere", - Replicate: "replicate", + Deepseek: "deepseek", Google: "google", Groq: "groq", - Deepseek: "deepseek", + Mock: "mock", + Openai: "openai", + OpenaiAzure: "openai_azure", + Replicate: "replicate", } as const; diff --git a/src/api/types/OnAgentCallEnum.ts b/src/api/types/OnAgentCallEnum.ts new file mode 100644 index 00000000..98969e44 --- /dev/null +++ b/src/api/types/OnAgentCallEnum.ts @@ -0,0 +1,12 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * What an Agent should do when calling a Tool. + */ +export type OnAgentCallEnum = "stop" | "continue"; +export const OnAgentCallEnum = { + Stop: "stop", + Continue: "continue", +} as const; diff --git a/src/api/types/ReasoningEffort.ts b/src/api/types/OpenAiReasoningEffort.ts similarity index 63% rename from src/api/types/ReasoningEffort.ts rename to src/api/types/OpenAiReasoningEffort.ts index 5aacc51a..a9dd607f 100644 --- a/src/api/types/ReasoningEffort.ts +++ b/src/api/types/OpenAiReasoningEffort.ts @@ -5,8 +5,8 @@ /** * Supported reasoning effort. */ -export type ReasoningEffort = "high" | "medium" | "low"; -export const ReasoningEffort = { +export type OpenAiReasoningEffort = "high" | "medium" | "low"; +export const OpenAiReasoningEffort = { High: "high", Medium: "medium", Low: "low", diff --git a/src/api/types/PaginatedDataAgentResponse.ts b/src/api/types/PaginatedDataAgentResponse.ts new file mode 100644 index 00000000..f5af5b6b --- /dev/null +++ b/src/api/types/PaginatedDataAgentResponse.ts @@ -0,0 +1,12 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../index"; + +export interface PaginatedDataAgentResponse { + records: Humanloop.AgentResponse[]; + page: number; + size: number; + total: number; +} diff --git a/src/api/types/PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse.ts b/src/api/types/PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse.ts similarity index 69% rename from src/api/types/PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse.ts rename to src/api/types/PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse.ts index 260194eb..308a2c42 100644 --- a/src/api/types/PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse.ts +++ b/src/api/types/PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse.ts @@ -4,8 +4,8 @@ import * as Humanloop from "../index"; -export interface PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse { - records: Humanloop.PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem[]; +export interface PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse { + records: Humanloop.PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem[]; page: number; size: number; total: number; diff --git a/src/api/types/PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem.ts b/src/api/types/PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem.ts new file mode 100644 index 00000000..e8363384 --- /dev/null +++ b/src/api/types/PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem.ts @@ -0,0 +1,14 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../index"; + +export type PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem = + + | Humanloop.PromptResponse + | Humanloop.ToolResponse + | Humanloop.DatasetResponse + | Humanloop.EvaluatorResponse + | Humanloop.FlowResponse + | Humanloop.AgentResponse; diff --git a/src/api/types/PopulateTemplateResponse.ts b/src/api/types/PopulateTemplateResponse.ts index 6db26034..c92b9621 100644 --- a/src/api/types/PopulateTemplateResponse.ts +++ b/src/api/types/PopulateTemplateResponse.ts @@ -52,8 +52,8 @@ export interface PopulateTemplateResponse { seed?: number; /** The format of the response. Only `{"type": "json_object"}` is currently supported for chat. */ responseFormat?: Humanloop.ResponseFormat; - /** Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models. */ - reasoningEffort?: Humanloop.ReasoningEffort; + /** Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. */ + reasoningEffort?: Humanloop.PopulateTemplateResponseReasoningEffort; /** The tool specification that the model can choose to call if Tool calling is supported. */ tools?: Humanloop.ToolFunction[]; /** The tools linked to your prompt that the model can call. */ @@ -72,6 +72,8 @@ export interface PopulateTemplateResponse { readme?: string; /** Name of the Prompt. */ name: string; + /** The JSON schema for the Prompt. */ + schema?: Record; /** Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned. */ versionId: string; type?: "prompt"; diff --git a/src/api/types/PopulateTemplateResponseReasoningEffort.ts b/src/api/types/PopulateTemplateResponseReasoningEffort.ts new file mode 100644 index 00000000..1d83b6f8 --- /dev/null +++ b/src/api/types/PopulateTemplateResponseReasoningEffort.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../index"; + +/** + * Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. + */ +export type PopulateTemplateResponseReasoningEffort = Humanloop.OpenAiReasoningEffort | number; diff --git a/src/api/types/PromptKernelRequest.ts b/src/api/types/PromptKernelRequest.ts index 2e1ec153..9225e0b9 100644 --- a/src/api/types/PromptKernelRequest.ts +++ b/src/api/types/PromptKernelRequest.ts @@ -4,6 +4,11 @@ import * as Humanloop from "../index"; +/** + * Base class used by both PromptKernelRequest and AgentKernelRequest. + * + * Contains the consistent Prompt-related fields. + */ export interface PromptKernelRequest { /** The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) */ model: string; @@ -40,8 +45,8 @@ export interface PromptKernelRequest { seed?: number; /** The format of the response. Only `{"type": "json_object"}` is currently supported for chat. */ responseFormat?: Humanloop.ResponseFormat; - /** Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models. */ - reasoningEffort?: Humanloop.ReasoningEffort; + /** Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. */ + reasoningEffort?: Humanloop.PromptKernelRequestReasoningEffort; /** The tool specification that the model can choose to call if Tool calling is supported. */ tools?: Humanloop.ToolFunction[]; /** The IDs of the Tools in your organization that the model can choose to call if Tool calling is supported. The default deployed version of that tool is called. */ diff --git a/src/api/types/PromptKernelRequestReasoningEffort.ts b/src/api/types/PromptKernelRequestReasoningEffort.ts new file mode 100644 index 00000000..ce1b6cab --- /dev/null +++ b/src/api/types/PromptKernelRequestReasoningEffort.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../index"; + +/** + * Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. + */ +export type PromptKernelRequestReasoningEffort = Humanloop.OpenAiReasoningEffort | number; diff --git a/src/api/types/PromptResponse.ts b/src/api/types/PromptResponse.ts index 161e500e..beb732f6 100644 --- a/src/api/types/PromptResponse.ts +++ b/src/api/types/PromptResponse.ts @@ -52,8 +52,8 @@ export interface PromptResponse { seed?: number; /** The format of the response. Only `{"type": "json_object"}` is currently supported for chat. */ responseFormat?: Humanloop.ResponseFormat; - /** Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models. */ - reasoningEffort?: Humanloop.ReasoningEffort; + /** Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. */ + reasoningEffort?: Humanloop.PromptResponseReasoningEffort; /** The tool specification that the model can choose to call if Tool calling is supported. */ tools?: Humanloop.ToolFunction[]; /** The tools linked to your prompt that the model can call. */ @@ -72,6 +72,8 @@ export interface PromptResponse { readme?: string; /** Name of the Prompt. */ name: string; + /** The JSON schema for the Prompt. */ + schema?: Record; /** Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned. */ versionId: string; type?: "prompt"; diff --git a/src/api/types/PromptResponseReasoningEffort.ts b/src/api/types/PromptResponseReasoningEffort.ts new file mode 100644 index 00000000..baaa9ca1 --- /dev/null +++ b/src/api/types/PromptResponseReasoningEffort.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../index"; + +/** + * Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. + */ +export type PromptResponseReasoningEffort = Humanloop.OpenAiReasoningEffort | number; diff --git a/src/api/types/RunVersionResponse.ts b/src/api/types/RunVersionResponse.ts index e9a0257a..a518f82d 100644 --- a/src/api/types/RunVersionResponse.ts +++ b/src/api/types/RunVersionResponse.ts @@ -8,4 +8,5 @@ export type RunVersionResponse = | Humanloop.PromptResponse | Humanloop.ToolResponse | Humanloop.EvaluatorResponse - | Humanloop.FlowResponse; + | Humanloop.FlowResponse + | Humanloop.AgentResponse; diff --git a/src/api/types/ToolCallResponse.ts b/src/api/types/ToolCallResponse.ts new file mode 100644 index 00000000..49cdd630 --- /dev/null +++ b/src/api/types/ToolCallResponse.ts @@ -0,0 +1,63 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Humanloop from "../index"; + +/** + * Response model for a Tool call. + */ +export interface ToolCallResponse { + /** When the logged event started. */ + startTime?: Date; + /** When the logged event ended. */ + endTime?: Date; + /** Tool used to generate the Log. */ + tool: Humanloop.ToolResponse; + /** Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. */ + output?: string; + /** User defined timestamp for when the log was created. */ + createdAt?: Date; + /** Error message if the log is an error. */ + error?: string; + /** Duration of the logged event in seconds. */ + providerLatency?: number; + /** Captured log and debug statements. */ + stdout?: string; + /** Raw request sent to provider. */ + providerRequest?: Record; + /** Raw response received the provider. */ + providerResponse?: Record; + /** The inputs passed to the prompt template. */ + inputs?: Record; + /** Identifies where the model was called from. */ + source?: string; + /** Any additional metadata to record. */ + metadata?: Record; + /** Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. */ + logStatus?: Humanloop.LogStatus; + /** Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. */ + sourceDatapointId?: string; + /** The ID of the parent Log to nest this Log under in a Trace. */ + traceParentId?: string; + /** Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations */ + batches?: string[]; + /** End-user ID related to the Log. */ + user?: string; + /** The name of the Environment the Log is associated to. */ + environment?: string; + /** Whether the request/response payloads will be stored on Humanloop. */ + save?: boolean; + /** This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. */ + logId?: string; + /** ID of the log. */ + id: string; + /** List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. */ + evaluatorLogs: Humanloop.EvaluatorLogResponse[]; + /** Identifier for the Flow that the Trace belongs to. */ + traceFlowId?: string; + /** ID of the Trace containing the Tool Call Log. */ + traceId?: string; + /** Logs nested under this Log in the Trace. */ + traceChildren?: Humanloop.LogResponse[]; +} diff --git a/src/api/types/ToolLogResponse.ts b/src/api/types/ToolLogResponse.ts index dac3d071..9da362a8 100644 --- a/src/api/types/ToolLogResponse.ts +++ b/src/api/types/ToolLogResponse.ts @@ -60,4 +60,6 @@ export interface ToolLogResponse { traceChildren?: Humanloop.LogResponse[]; /** Tool used to generate the Log. */ tool: Humanloop.ToolResponse; + /** The message returned by the Tool. */ + outputMessage?: Humanloop.ChatMessage; } diff --git a/src/api/types/VersionDeploymentResponseFile.ts b/src/api/types/VersionDeploymentResponseFile.ts index f6178319..dee7f20a 100644 --- a/src/api/types/VersionDeploymentResponseFile.ts +++ b/src/api/types/VersionDeploymentResponseFile.ts @@ -12,4 +12,5 @@ export type VersionDeploymentResponseFile = | Humanloop.ToolResponse | Humanloop.DatasetResponse | Humanloop.EvaluatorResponse - | Humanloop.FlowResponse; + | Humanloop.FlowResponse + | Humanloop.AgentResponse; diff --git a/src/api/types/VersionIdResponseVersion.ts b/src/api/types/VersionIdResponseVersion.ts index 1e9f9710..a5279b59 100644 --- a/src/api/types/VersionIdResponseVersion.ts +++ b/src/api/types/VersionIdResponseVersion.ts @@ -12,4 +12,5 @@ export type VersionIdResponseVersion = | Humanloop.ToolResponse | Humanloop.DatasetResponse | Humanloop.EvaluatorResponse - | Humanloop.FlowResponse; + | Humanloop.FlowResponse + | Humanloop.AgentResponse; diff --git a/src/api/types/index.ts b/src/api/types/index.ts index 02032449..50b0fe8b 100644 --- a/src/api/types/index.ts +++ b/src/api/types/index.ts @@ -1,9 +1,38 @@ +export * from "./AgentCallResponseToolChoice"; +export * from "./AgentCallResponse"; +export * from "./AgentCallStreamResponsePayload"; +export * from "./AgentCallStreamResponse"; +export * from "./AgentContinueCallResponseToolChoice"; +export * from "./AgentContinueCallResponse"; +export * from "./AgentContinueCallStreamResponsePayload"; +export * from "./AgentContinueCallStreamResponse"; +export * from "./AgentInlineTool"; +export * from "./AgentKernelRequestTemplate"; +export * from "./AgentKernelRequestStop"; +export * from "./AgentKernelRequestReasoningEffort"; +export * from "./AgentKernelRequestToolsItem"; +export * from "./AgentKernelRequest"; +export * from "./AgentLinkedFileRequest"; +export * from "./AgentLinkedFileResponseFile"; +export * from "./AgentLinkedFileResponse"; +export * from "./AgentLogResponseToolChoice"; +export * from "./AgentLogResponse"; +export * from "./AgentLogStreamResponse"; +export * from "./AgentResponseTemplate"; +export * from "./AgentResponseStop"; +export * from "./AgentResponseReasoningEffort"; +export * from "./AgentResponseToolsItem"; +export * from "./AgentResponse"; +export * from "./AnthropicRedactedThinkingContent"; +export * from "./AnthropicThinkingContent"; export * from "./BooleanEvaluatorStatsResponse"; export * from "./ChatMessageContentItem"; export * from "./ChatMessageContent"; +export * from "./ChatMessageThinkingItem"; export * from "./ChatMessage"; export * from "./ChatRole"; export * from "./CodeEvaluatorRequest"; +export * from "./CreateAgentLogResponse"; export * from "./CreateDatapointRequestTargetValue"; export * from "./CreateDatapointRequest"; export * from "./CreateEvaluatorLogResponse"; @@ -43,9 +72,11 @@ export * from "./EvaluatorResponseSpec"; export * from "./EvaluatorResponse"; export * from "./EvaluatorReturnTypeEnum"; export * from "./EvaluatorVersionId"; +export * from "./EventType"; export * from "./ExternalEvaluatorRequest"; export * from "./FileEnvironmentResponseFile"; export * from "./FileEnvironmentResponse"; +export * from "./FileEnvironmentVariableRequest"; export * from "./FileId"; export * from "./FilePath"; export * from "./FileRequest"; @@ -63,7 +94,9 @@ export * from "./ImageUrlDetail"; export * from "./ImageUrl"; export * from "./InputResponse"; export * from "./LlmEvaluatorRequest"; +export * from "./LinkedFileRequest"; export * from "./LinkedToolResponse"; +export * from "./ListAgents"; export * from "./ListDatasets"; export * from "./ListEvaluators"; export * from "./ListFlows"; @@ -71,6 +104,7 @@ export * from "./ListPrompts"; export * from "./ListTools"; export * from "./LogResponse"; export * from "./LogStatus"; +export * from "./LogStreamResponse"; export * from "./ModelEndpoints"; export * from "./ModelProviders"; export * from "./MonitoringEvaluatorEnvironmentRequest"; @@ -79,7 +113,10 @@ export * from "./MonitoringEvaluatorState"; export * from "./MonitoringEvaluatorVersionRequest"; export * from "./NumericEvaluatorStatsResponse"; export * from "./ObservabilityStatus"; +export * from "./OnAgentCallEnum"; +export * from "./OpenAiReasoningEffort"; export * from "./OverallStats"; +export * from "./PaginatedDataAgentResponse"; export * from "./PaginatedDatapointResponse"; export * from "./PaginatedDatasetResponse"; export * from "./PaginatedDataEvaluationLogResponse"; @@ -89,11 +126,12 @@ export * from "./PaginatedDataFlowResponse"; export * from "./PaginatedDataLogResponse"; export * from "./PaginatedDataPromptResponse"; export * from "./PaginatedDataToolResponse"; -export * from "./PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem"; -export * from "./PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse"; +export * from "./PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem"; +export * from "./PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse"; export * from "./PlatformAccessEnum"; export * from "./PopulateTemplateResponseTemplate"; export * from "./PopulateTemplateResponseStop"; +export * from "./PopulateTemplateResponseReasoningEffort"; export * from "./PopulateTemplateResponsePopulatedTemplate"; export * from "./PopulateTemplateResponse"; export * from "./ProjectSortBy"; @@ -103,14 +141,15 @@ export * from "./PromptCallResponse"; export * from "./PromptCallStreamResponse"; export * from "./PromptKernelRequestTemplate"; export * from "./PromptKernelRequestStop"; +export * from "./PromptKernelRequestReasoningEffort"; export * from "./PromptKernelRequest"; export * from "./PromptLogResponseToolChoice"; export * from "./PromptLogResponse"; export * from "./PromptResponseTemplate"; export * from "./PromptResponseStop"; +export * from "./PromptResponseReasoningEffort"; export * from "./PromptResponse"; export * from "./ProviderApiKeys"; -export * from "./ReasoningEffort"; export * from "./ResponseFormatType"; export * from "./ResponseFormat"; export * from "./RunStatsResponseEvaluatorStatsItem"; @@ -123,6 +162,7 @@ export * from "./TextChatContent"; export * from "./TextEvaluatorStatsResponse"; export * from "./TimeUnit"; export * from "./ToolCall"; +export * from "./ToolCallResponse"; export * from "./ToolChoice"; export * from "./ToolFunction"; export * from "./ToolKernelRequest"; diff --git a/src/evals/run.ts b/src/evals/run.ts index c712b928..4a618e4b 100644 --- a/src/evals/run.ts +++ b/src/evals/run.ts @@ -11,6 +11,7 @@ import cliProgress from "cli-progress"; import _, { capitalize } from "lodash"; import { + AgentResponse, BooleanEvaluatorStatsResponse, CreateEvaluationRequestEvaluatorsItem, DatapointResponse, @@ -47,9 +48,10 @@ import { HumanloopRuntimeError } from "../error"; import { Humanloop, HumanloopClient } from "../index"; import { Dataset, + EvalFileType, Evaluator, EvaluatorCheck, - File, + File as FileEvalConfig, LocalEvaluator, LocalEvaluatorReturnTypeEnum, OnlineEvaluator, @@ -62,166 +64,12 @@ const GREEN = "\x1b[92m"; const RED = "\x1b[91m"; const RESET = "\x1b[0m"; -/** - * Maps over an array of items with a concurrency limit, applying an asynchronous mapper function to each item. - * - * @template T - The type of the items in the input array. - * @template O - The type of the items in the output array. - * - * @param {T[]} iterable - The array of items to be mapped. - * @param {(item: T) => Promise} mapper - The asynchronous function to apply to each item. - * @param {{ concurrency: number }} options - Options for the mapping operation. - * @param {number} options.concurrency - The maximum number of promises to resolve concurrently. - * - * @returns {Promise} A promise that resolves to an array of mapped items. - * - * @throws {TypeError} If the first argument is not an array. - * @throws {TypeError} If the second argument is not a function. - * @throws {TypeError} If the concurrency option is not a positive number. - * - * @description - * The `pMap` function processes the input array in chunks, where the size of each chunk is determined by the `concurrency` option. - * This controls how many promises are resolved at a time, which can help avoid issues such as rate limit errors when making server requests. - */ -async function pMap( - iterable: T[], - mapper: (item: T) => Promise, - options: { concurrency: number }, -): Promise { - const { concurrency } = options; - - if (!Array.isArray(iterable)) { - throw new TypeError("Expected the first argument to be an array"); - } - - if (typeof mapper !== "function") { - throw new TypeError("Expected the second argument to be a function"); - } - - if (typeof concurrency !== "number" || concurrency <= 0) { - throw new TypeError("Expected the concurrency option to be a positive number"); - } - - const result: O[] = []; - for (let i = 0; i < iterable.length; i += concurrency) { - const chunk = iterable.slice(i, i + concurrency); - try { - const chunkResults = await Promise.all(chunk.map(mapper)); - result.push(...chunkResults); - } catch (error) { - // Handle individual chunk errors if necessary - // For now, rethrow to reject the entire pMap promise - throw error; - } - } - return result; -} - -function callableIsHumanloopUtility< - I extends Record & { messages?: any[] }, - O, ->(file: File): boolean { - return file.callable !== undefined && "decorator" in file.callable; -} - -function fileOrFileInsideHLUtility< - I extends Record & { messages?: any[] }, - O, ->(file: File): File { - if (callableIsHumanloopUtility(file)) { - // @ts-ignore - const innerFile: File = file.callable!.file! as File; - if ("path" in file && innerFile.path !== file.path) { - throw new HumanloopRuntimeError( - "`path` attribute specified in the `file` does not match the path of the decorated function. " + - `Expected \`${innerFile.path}\`, got \`${file.path}\`.`, - ); - } - if ("id" in file) { - throw new HumanloopRuntimeError( - "Do not specify an `id` attribute in `file` argument when using a decorated function.", - ); - } - if ("version" in file) { - if (innerFile.type !== "prompt") { - throw new HumanloopRuntimeError( - `Do not specify a \`version\` attribute in \`file\` argument when using a ${capitalize(innerFile.type)} decorated function.`, - ); - } - } - if ("type" in file && innerFile.type !== file.type) { - throw new HumanloopRuntimeError( - "Attribute `type` of `file` argument does not match the file type of the decorated function. " + - `Expected \`${innerFile.type}\`, got \`${file.type}\`.`, - ); - } - const file_ = { ...innerFile }; - if (file_.type === "prompt") { - console.warn( - `${YELLOW}` + - "The @prompt decorator will not spy on provider calls when passed to `evaluations.run()`. " + - "Using the `version` in `file` argument instead.\n" + - `${RESET}`, - ); - file_.version = file.version; - } - return file_; - } else { - const file_ = { ...file }; - if (!file_.path && !file_.id) { - throw new HumanloopRuntimeError( - "You must provide a path or id in your `file`.", - ); - } - return file_; - } -} - -function getFileType & { messages?: any[] }, O>( - file: File, -): FileType { - // Determine the `type` of the `file` to Evaluate - if not `type` provided, default to `flow` - try { - const type_ = file.type as FileType; - console.info( - `${CYAN}Evaluating your ${type_} function corresponding to \`${file.path || file.id}\` on Humanloop${RESET}\n\n`, - ); - return type_ || "flow"; - } catch (error) { - const type_ = "flow"; - console.warn( - `${YELLOW}No \`file\` type specified, defaulting to flow.${RESET}\n`, - ); - return type_; - } -} - -function getFileCallable & { messages?: any[] }, O>( - file: File, - type_: FileType, -): File["callable"] { - // Get the `callable` from the `file` to Evaluate - const function_ = file.callable; - if (!function_) { - if (type_ === "flow") { - throw new Error( - "You must provide a `callable` for your Flow `file` to run a local eval.", - ); - } else { - console.info( - `${CYAN}No \`callable\` provided for your ${type_} file - will attempt to generate logs on Humanloop.\n\n${RESET}`, - ); - } - } - return function_; -} - export async function runEval< I extends Record & { messages?: any[] }, O, >( client: HumanloopClient, - file: File, + fileConfig: FileEvalConfig, dataset: Dataset, name?: string, evaluators: ( @@ -235,29 +83,13 @@ export async function runEval< } concurrency = Math.min(concurrency, 32); - const file_ = fileOrFileInsideHLUtility(file); - const type_ = getFileType(file_); - const function_ = getFileCallable(file_, type_); - - if (function_ && "file" in function_) { - // @ts-ignore - const decoratorType = (function_.file as File).type; - if (decoratorType !== type_) { - throw new HumanloopRuntimeError( - `The type of the decorated function does not match the type of the file. Expected \`${capitalize(type_)}\`, got \`${capitalize(decoratorType)}\`.`, - ); - } - } - - let hlFile: PromptResponse | FlowResponse | ToolResponse | EvaluatorResponse; - try { - hlFile = await upsertFile({ file: file_, type: type_, client: client }); - } catch (e: any) { - console.error( - `${RED}Error in your \`file\` argument:\n\n${e.constructor.name}: ${e.message}${RESET}`, + const [hlFile, function_] = await getHLFile(client, fileConfig); + if (hlFile.type === "flow" && !function_) { + throw new HumanloopRuntimeError( + "Flows can only be evaluated locally, please provide a callable inside `file` argument.", ); - return []; } + const type_ = hlFile.type as FileType; let hlDataset: DatasetResponse; try { @@ -309,7 +141,7 @@ export async function runEval< function handleExitSignal(signum: number) { process.stderr.write( - `\n${RED}Received signal ${signum}, cancelling Evaluation and shutting down threads...${RESET}\n`, + `\n${RED}Received signal ${signum}, cancelling the Evaluation...${RESET}\n`, ); cancelEvaluation(); process.exit(signum); @@ -331,12 +163,12 @@ export async function runEval< if (function_ === undefined) { // TODO: trigger run when updated API is available process.stdout.write( - `${CYAN}\nRunning '${hlFile.name}' over the Dataset '${hlDataset.name}'${RESET}\n`, + `${CYAN}\nRunning '${hlFile.name}' ${_.capitalize(hlFile.type)} over the '${hlDataset.name}' Dataset${RESET}\n`, ); } else { // Running the evaluation locally process.stdout.write( - `${CYAN}\nRunning '${hlFile.name}' over the Dataset '${hlDataset.name}' locally...${RESET}\n\n`, + `${CYAN}\nRunning '${hlFile.name}' ${_.capitalize(hlFile.type)} over the '${hlDataset.name}' Dataset locally...${RESET}\n\n`, ); } @@ -453,9 +285,6 @@ export async function runEval< // Generate locally if a function is provided if (function_) { - console.log( - `${CYAN}\nRunning ${hlFile.name} over the Dataset ${hlDataset.name}${RESET}`, - ); const totalDatapoints = hlDataset.datapoints!.length; progressBar.start(totalDatapoints, 0); @@ -467,11 +296,6 @@ export async function runEval< { concurrency: concurrency }, ); progressBar.stop(); - } else { - // TODO: trigger run when updated API is available - console.log( - `${CYAN}\nRunning ${hlFile.name} over the Dataset ${hlDataset.name}${RESET}`, - ); } // Wait for the Evaluation to complete then print the results @@ -530,10 +354,341 @@ export async function runEval< return checks; } +/** + * Maps over an array of items with a concurrency limit, applying an asynchronous mapper function to each item. + * + * @template T - The type of the items in the input array. + * @template O - The type of the items in the output array. + * + * @param {T[]} iterable - The array of items to be mapped. + * @param {(item: T) => Promise} mapper - The asynchronous function to apply to each item. + * @param {{ concurrency: number }} options - Options for the mapping operation. + * @param {number} options.concurrency - The maximum number of promises to resolve concurrently. + * + * @returns {Promise} A promise that resolves to an array of mapped items. + * + * @throws {TypeError} If the first argument is not an array. + * @throws {TypeError} If the second argument is not a function. + * @throws {TypeError} If the concurrency option is not a positive number. + * + * @description + * The `pMap` function processes the input array in chunks, where the size of each chunk is determined by the `concurrency` option. + * This controls how many promises are resolved at a time, which can help avoid issues such as rate limit errors when making server requests. + */ +async function pMap( + iterable: T[], + mapper: (item: T) => Promise, + options: { concurrency: number }, +): Promise { + const { concurrency } = options; + + if (!Array.isArray(iterable)) { + throw new TypeError("Expected the first argument to be an array"); + } + + if (typeof mapper !== "function") { + throw new TypeError("Expected the second argument to be a function"); + } + + if (typeof concurrency !== "number" || concurrency <= 0) { + throw new TypeError("Expected the concurrency option to be a positive number"); + } + + const result: O[] = []; + for (let i = 0; i < iterable.length; i += concurrency) { + const chunk = iterable.slice(i, i + concurrency); + try { + const chunkResults = await Promise.all(chunk.map(mapper)); + result.push(...chunkResults); + } catch (error) { + // Handle individual chunk errors if necessary + // For now, rethrow to reject the entire pMap promise + throw error; + } + } + return result; +} + +function callableIsHumanloopDecorator< + I extends Record & { messages?: any[] }, + O, +>(file: FileEvalConfig): boolean { + return file.callable !== undefined && "file" in file.callable; +} + +function fileOrFileInsideHLUtility< + I extends Record & { messages?: any[] }, + O, +>(file: FileEvalConfig): FileEvalConfig { + if (callableIsHumanloopDecorator(file)) { + // @ts-ignore + const innerFile: FileEvalConfig = file.callable!.file! as FileEvalConfig< + I, + O + >; + if ("path" in file && innerFile.path !== file.path) { + throw new HumanloopRuntimeError( + "`path` attribute specified in the `file` does not match the path of the decorated function. " + + `Expected \`${innerFile.path}\`, got \`${file.path}\`.`, + ); + } + if ("id" in file) { + throw new HumanloopRuntimeError( + "Do not specify an `id` attribute in `file` argument when using a decorated function.", + ); + } + if ("version" in file) { + if (innerFile.type !== "prompt") { + throw new HumanloopRuntimeError( + `Do not specify a \`version\` attribute in \`file\` argument when using a ${capitalize(innerFile.type)} decorated function.`, + ); + } + } + if ("type" in file && innerFile.type !== file.type) { + throw new HumanloopRuntimeError( + "Attribute `type` of `file` argument does not match the file type of the decorated function. " + + `Expected \`${innerFile.type}\`, got \`${file.type}\`.`, + ); + } + const file_ = { ...innerFile }; + if (file_.type === "prompt") { + console.warn( + `${YELLOW}` + + "The @prompt decorator will not spy on provider calls when passed to `evaluations.run()`. " + + "Using the `version` in `file` argument instead.\n" + + `${RESET}`, + ); + file_.version = file.version; + } + return file_; + } else { + const file_ = { ...file }; + if (!file_.path && !file_.id) { + throw new HumanloopRuntimeError( + "You must provide a path or id in your `file`.", + ); + } + return file_; + } +} + +function getFileType & { messages?: any[] }, O>( + file: FileEvalConfig, +): FileEvalConfig { + // Determine the `type` of the `file` to Evaluate - if not `type` provided, default to `flow` + try { + let type_ = file.type as EvalFileType; + console.info( + `${CYAN}Evaluating your ${type_} function corresponding to \`${file.path || file.id}\` on Humanloop${RESET}\n\n`, + ); + if (!type_) { + type_ = "flow"; + } + return { + ...file, + type: type_, + }; + } catch (error) { + const type_ = "flow"; + console.warn( + `${YELLOW}No \`file\` type specified, defaulting to flow.${RESET}\n`, + ); + return { + ...file, + type: type_, + }; + } +} + +function getFileCallable & { messages?: any[] }, O>( + file: FileEvalConfig, +): FileEvalConfig["callable"] { + // Get the `callable` from the `file` to Evaluate + const function_ = file.callable; + const type_ = file.type; + if (!function_) { + if (type_ === "flow") { + throw new Error( + "You must provide a `callable` for your Flow `file` to run a local eval.", + ); + } else { + console.info( + `${CYAN}No \`callable\` provided for your ${_.capitalize(type_)} file - will attempt to generate logs on Humanloop.\n\n${RESET}`, + ); + } + } else if (type_ === "agent") { + throw new HumanloopRuntimeError( + "Agent evaluation is only possible on the Humanloop runtime, do not provide a `callable`.", + ); + } + return function_; +} + +type EvaluatedFile = + | PromptResponse + | FlowResponse + | ToolResponse + | EvaluatorResponse + | AgentResponse; + +/** + * Check if the config object is valid, and resolve the File to be evaluated + * + * The callable will be undefined if the evaluation will happen on Humanloop runtime. + * Otherwise, the evaluation will happen locally. + */ +async function getHLFile & { message?: any[] }, O>( + client: HumanloopClient, + fileConfig: FileEvalConfig, +): Promise<[EvaluatedFile, FileEvalConfig["callable"]]> { + let file_ = fileOrFileInsideHLUtility(fileConfig); + file_ = getFileType(file_); + + return await resolveFile(client, file_); +} + +/** + * Get the appropriate subclient based on file type. + */ +function getSubclient & { message?: any[] }, O>( + client: HumanloopClient, + fileConfig: FileEvalConfig, +) { + const type = fileConfig.type; + switch (type) { + case "prompt": + return client.prompts; + case "flow": + return client.flows; + case "agent": + return client.agents; + default: + throw new HumanloopRuntimeError(`Unsupported file type: ${type}`); + } +} + +/** + * Get default version of a File from online workspace. + * + * Uses either the File path or id from the config. + * + * Raise error if the File is not of the expected type, or if the user has provided both a path and an id. + */ +async function safeGetDefaultFileVersion< + I extends Record & { message?: any[] }, + O, +>(client: HumanloopClient, fileConfig: FileEvalConfig) { + const path = fileConfig.path; + const type = fileConfig.type; + const fileId = fileConfig.id; + + if (!path && !fileId) { + throw new HumanloopRuntimeError( + `You must provide a path or id in your \`file\`.`, + ); + } + + if (path) { + let hlFile = await client.files.retrieveByPath({ path: path }); + if (hlFile.type !== type) { + throw new HumanloopRuntimeError( + `File in Humanloop workspace at ${path} is not of type ${type}, but ${hlFile.type}.`, + ); + } + return hlFile; + } else if (fileId) { + const subclient = getSubclient(client, fileConfig); + return await subclient.get(fileId!); + } else { + throw new HumanloopRuntimeError( + "Either a path or file ID should be provided in your File eval config", + ); + } +} + +/** + * Resolve the File to be evaluated. Will return a FileResponse and an optional callable. + * + * If the callable is null, the File will be evaluated on Humanloop. Otherwise, the File will be evaluated locally. + */ +async function resolveFile & { message?: any[] }, O>( + client: HumanloopClient, + fileConfig: FileEvalConfig, +): Promise<[EvaluatedFile, FileEvalConfig["callable"]]> { + const fileId = fileConfig.id; + const path = fileConfig.path; + const versionId = fileConfig.versionId; + const environment = fileConfig.environment; + const callable = getFileCallable(fileConfig); + const version = fileConfig.version; + + if (callable && !path && !fileId) { + throw new HumanloopRuntimeError( + `You are trying to create a new version of the File by passing the ${callable} argument. You must pass either the \`file.path\` or \`file.fileId\` argument and provide proper \`file.version\` for upserting the File.`, + ); + } + + if ((versionId || environment) && (callable || version)) { + throw new HumanloopRuntimeError( + "You are trying to create a local Evaluation while requesting a specific File version by version ID or environment.", + ); + } + + let hlFile: EvaluatedFile; + + try { + hlFile = (await safeGetDefaultFileVersion(client, fileConfig)) as EvaluatedFile; + } catch (error: any) { + if (!version || !path || fileId) { + throw new HumanloopRuntimeError( + "File does not exist on Humanloop. Please provide a `file.path` and a version to create a new version.", + ); + } + console.log("UPSERTING FILE", JSON.stringify(fileConfig, null, 2)); + return [await upsertFile(client, fileConfig), callable]; + } + + if (version) { + // User responsibility to provide adequate file.version for upserting the file + console.info( + `${CYAN}Upserting a new File version based on \`file.version\`. Will use provided callable for generating Logs.${RESET}\n`, + ); + try { + return [ + await upsertFile(client, fileConfig), + callable ? callable : undefined, + ]; + } catch (error: any) { + throw new HumanloopRuntimeError( + `Error upserting File. Please ensure \`file.version\` is valid: ${error.toString()}`, + ); + } + } + + if (!versionId && !environment) { + // Return default version of the file + return [hlFile as unknown as EvaluatedFile, callable]; + } + + if (!fileId && (versionId || environment)) { + throw new HumanloopRuntimeError( + "You must provide the `file.id` when addressing a file by version ID or environment", + ); + } + + // Use version_id or environment to retrieve specific version of the File + const subclient = getSubclient(client, fileConfig); + // Let backend handle case where both or none of version_id and environment are provided + return [await subclient.get(fileId!, { versionId, environment }), undefined]; +} + async function callFunction< I extends Record & { messages?: any[] }, O, ->(callable: File["callable"], datapoint: DatapointResponse): Promise { +>( + callable: FileEvalConfig["callable"], + datapoint: DatapointResponse, +): Promise { const datapointDict = { ...datapoint }; let output; if (callable === undefined) { @@ -562,25 +717,21 @@ async function callFunction< return output; } -async function upsertFile & { messages?: any[] }, O>({ - file, - type, - client, -}: { - file: File; - type: FileType; - client: HumanloopClient; -}): Promise { +async function upsertFile & { messages?: any[] }, O>( + client: HumanloopClient, + fileConfig: FileEvalConfig, +): Promise { // Get or create the file on Humanloop - const version = file.version || {}; - const fileDict = { ...file, ...version }; + const version = fileConfig.version || {}; + const fileDict = { ...fileConfig, ...version }; + const type = fileConfig.type; let hlFile: PromptResponse | FlowResponse | ToolResponse | EvaluatorResponse; switch (type) { case "flow": // Be more lenient with Flow versions as they are arbitrary json - const flowVersion = { attributes: version }; - const fileDictWithFlowVersion = { ...file, ...flowVersion }; + const flowVersion = version ? { ...version } : { attributes: {} }; + const fileDictWithFlowVersion = { ...fileConfig, ...flowVersion }; hlFile = await client.flows.upsert(fileDictWithFlowVersion as FlowRequest); break; case "prompt": @@ -589,11 +740,8 @@ async function upsertFile & { messages?: any[] case "tool": hlFile = await client.tools.upsert(fileDict as ToolRequest); break; - case "evaluator": - hlFile = await client.evaluators.upsert(fileDict as EvaluatorRequest); - break; default: - throw new Error(`Unsupported File type: ${type}`); + throw new HumanloopRuntimeError(`Unsupported File type: ${type}`); } return hlFile; @@ -645,7 +793,7 @@ async function getNewRun({ client: HumanloopClient; evaluationName: string | undefined; evaluators: Evaluator[]; - hlFile: PromptResponse | FlowResponse | ToolResponse | EvaluatorResponse; + hlFile: EvaluatedFile; hlDataset: DatasetResponse; func: ((inputs: Record) => Promise) | undefined; }): Promise<{ evaluation: EvaluationResponse; run: EvaluationRunResponse }> { @@ -709,7 +857,7 @@ async function upsertLocalEvaluators< client, }: { evaluators: LocalEvaluator[]; - callable: File["callable"]; + callable: FileEvalConfig["callable"]; type: FileType; client: HumanloopClient; }): Promise<_LocalEvaluator[]> { diff --git a/src/evals/types.ts b/src/evals/types.ts index 59abb423..547e1f65 100644 --- a/src/evals/types.ts +++ b/src/evals/types.ts @@ -45,12 +45,16 @@ export type FileResponse = interface Identifiers { id?: string; path?: string; + versionId?: string; + environment?: string; } +export type EvalFileType = "prompt" | "tool" | "flow" | "agent"; + export interface File & { messages?: any[] }, O> extends Identifiers { /** The type of File this callable relates to on Humanloop. */ - type?: "flow" | "prompt"; + type?: EvalFileType; /** The contents uniquely define the version of the File on Humanloop. */ version?: Version; /** diff --git a/src/serialization/resources/agents/client/index.ts b/src/serialization/resources/agents/client/index.ts new file mode 100644 index 00000000..0ab8d679 --- /dev/null +++ b/src/serialization/resources/agents/client/index.ts @@ -0,0 +1,2 @@ +export * as listEnvironments from "./listEnvironments"; +export * from "./requests"; diff --git a/src/serialization/resources/agents/client/listEnvironments.ts b/src/serialization/resources/agents/client/listEnvironments.ts new file mode 100644 index 00000000..2f0b7f77 --- /dev/null +++ b/src/serialization/resources/agents/client/listEnvironments.ts @@ -0,0 +1,17 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Humanloop from "../../../../api/index"; +import * as core from "../../../../core"; +import { FileEnvironmentResponse } from "../../../types/FileEnvironmentResponse"; + +export const Response: core.serialization.Schema< + serializers.agents.listEnvironments.Response.Raw, + Humanloop.FileEnvironmentResponse[] +> = core.serialization.list(FileEnvironmentResponse); + +export declare namespace Response { + export type Raw = FileEnvironmentResponse.Raw[]; +} diff --git a/src/serialization/resources/agents/client/requests/AgentLogRequest.ts b/src/serialization/resources/agents/client/requests/AgentLogRequest.ts new file mode 100644 index 00000000..84babd7f --- /dev/null +++ b/src/serialization/resources/agents/client/requests/AgentLogRequest.ts @@ -0,0 +1,92 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../index"; +import * as Humanloop from "../../../../../api/index"; +import * as core from "../../../../../core"; +import { ChatMessage } from "../../../../types/ChatMessage"; +import { AgentLogRequestToolChoice } from "../../types/AgentLogRequestToolChoice"; +import { AgentKernelRequest } from "../../../../types/AgentKernelRequest"; +import { LogStatus } from "../../../../types/LogStatus"; + +export const AgentLogRequest: core.serialization.Schema< + serializers.AgentLogRequest.Raw, + Omit +> = core.serialization.object({ + runId: core.serialization.property("run_id", core.serialization.string().optional()), + path: core.serialization.string().optional(), + id: core.serialization.string().optional(), + outputMessage: core.serialization.property("output_message", ChatMessage.optional()), + promptTokens: core.serialization.property("prompt_tokens", core.serialization.number().optional()), + reasoningTokens: core.serialization.property("reasoning_tokens", core.serialization.number().optional()), + outputTokens: core.serialization.property("output_tokens", core.serialization.number().optional()), + promptCost: core.serialization.property("prompt_cost", core.serialization.number().optional()), + outputCost: core.serialization.property("output_cost", core.serialization.number().optional()), + finishReason: core.serialization.property("finish_reason", core.serialization.string().optional()), + messages: core.serialization.list(ChatMessage).optional(), + toolChoice: core.serialization.property("tool_choice", AgentLogRequestToolChoice.optional()), + agent: AgentKernelRequest.optional(), + startTime: core.serialization.property("start_time", core.serialization.date().optional()), + endTime: core.serialization.property("end_time", core.serialization.date().optional()), + output: core.serialization.string().optional(), + createdAt: core.serialization.property("created_at", core.serialization.date().optional()), + error: core.serialization.string().optional(), + providerLatency: core.serialization.property("provider_latency", core.serialization.number().optional()), + stdout: core.serialization.string().optional(), + providerRequest: core.serialization.property( + "provider_request", + core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), + ), + providerResponse: core.serialization.property( + "provider_response", + core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), + ), + inputs: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), + source: core.serialization.string().optional(), + metadata: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), + logStatus: core.serialization.property("log_status", LogStatus.optional()), + sourceDatapointId: core.serialization.property("source_datapoint_id", core.serialization.string().optional()), + traceParentId: core.serialization.property("trace_parent_id", core.serialization.string().optional()), + user: core.serialization.string().optional(), + agentLogRequestEnvironment: core.serialization.property("environment", core.serialization.string().optional()), + save: core.serialization.boolean().optional(), + logId: core.serialization.property("log_id", core.serialization.string().optional()), +}); + +export declare namespace AgentLogRequest { + export interface Raw { + run_id?: string | null; + path?: string | null; + id?: string | null; + output_message?: ChatMessage.Raw | null; + prompt_tokens?: number | null; + reasoning_tokens?: number | null; + output_tokens?: number | null; + prompt_cost?: number | null; + output_cost?: number | null; + finish_reason?: string | null; + messages?: ChatMessage.Raw[] | null; + tool_choice?: AgentLogRequestToolChoice.Raw | null; + agent?: AgentKernelRequest.Raw | null; + start_time?: string | null; + end_time?: string | null; + output?: string | null; + created_at?: string | null; + error?: string | null; + provider_latency?: number | null; + stdout?: string | null; + provider_request?: Record | null; + provider_response?: Record | null; + inputs?: Record | null; + source?: string | null; + metadata?: Record | null; + log_status?: LogStatus.Raw | null; + source_datapoint_id?: string | null; + trace_parent_id?: string | null; + user?: string | null; + environment?: string | null; + save?: boolean | null; + log_id?: string | null; + } +} diff --git a/src/serialization/resources/agents/client/requests/AgentRequest.ts b/src/serialization/resources/agents/client/requests/AgentRequest.ts new file mode 100644 index 00000000..a37e6710 --- /dev/null +++ b/src/serialization/resources/agents/client/requests/AgentRequest.ts @@ -0,0 +1,74 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../index"; +import * as Humanloop from "../../../../../api/index"; +import * as core from "../../../../../core"; +import { ModelEndpoints } from "../../../../types/ModelEndpoints"; +import { AgentRequestTemplate } from "../../types/AgentRequestTemplate"; +import { TemplateLanguage } from "../../../../types/TemplateLanguage"; +import { ModelProviders } from "../../../../types/ModelProviders"; +import { AgentRequestStop } from "../../types/AgentRequestStop"; +import { ResponseFormat } from "../../../../types/ResponseFormat"; +import { AgentRequestReasoningEffort } from "../../types/AgentRequestReasoningEffort"; +import { AgentRequestToolsItem } from "../../types/AgentRequestToolsItem"; + +export const AgentRequest: core.serialization.Schema = + core.serialization.object({ + path: core.serialization.string().optional(), + id: core.serialization.string().optional(), + model: core.serialization.string(), + endpoint: ModelEndpoints.optional(), + template: AgentRequestTemplate.optional(), + templateLanguage: core.serialization.property("template_language", TemplateLanguage.optional()), + provider: ModelProviders.optional(), + maxTokens: core.serialization.property("max_tokens", core.serialization.number().optional()), + temperature: core.serialization.number().optional(), + topP: core.serialization.property("top_p", core.serialization.number().optional()), + stop: AgentRequestStop.optional(), + presencePenalty: core.serialization.property("presence_penalty", core.serialization.number().optional()), + frequencyPenalty: core.serialization.property("frequency_penalty", core.serialization.number().optional()), + other: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), + seed: core.serialization.number().optional(), + responseFormat: core.serialization.property("response_format", ResponseFormat.optional()), + reasoningEffort: core.serialization.property("reasoning_effort", AgentRequestReasoningEffort.optional()), + tools: core.serialization.list(AgentRequestToolsItem).optional(), + attributes: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), + maxIterations: core.serialization.property("max_iterations", core.serialization.number().optional()), + versionName: core.serialization.property("version_name", core.serialization.string().optional()), + versionDescription: core.serialization.property("version_description", core.serialization.string().optional()), + description: core.serialization.string().optional(), + tags: core.serialization.list(core.serialization.string()).optional(), + readme: core.serialization.string().optional(), + }); + +export declare namespace AgentRequest { + export interface Raw { + path?: string | null; + id?: string | null; + model: string; + endpoint?: ModelEndpoints.Raw | null; + template?: AgentRequestTemplate.Raw | null; + template_language?: TemplateLanguage.Raw | null; + provider?: ModelProviders.Raw | null; + max_tokens?: number | null; + temperature?: number | null; + top_p?: number | null; + stop?: AgentRequestStop.Raw | null; + presence_penalty?: number | null; + frequency_penalty?: number | null; + other?: Record | null; + seed?: number | null; + response_format?: ResponseFormat.Raw | null; + reasoning_effort?: AgentRequestReasoningEffort.Raw | null; + tools?: AgentRequestToolsItem.Raw[] | null; + attributes?: Record | null; + max_iterations?: number | null; + version_name?: string | null; + version_description?: string | null; + description?: string | null; + tags?: string[] | null; + readme?: string | null; + } +} diff --git a/src/serialization/resources/agents/client/requests/AgentsCallRequest.ts b/src/serialization/resources/agents/client/requests/AgentsCallRequest.ts new file mode 100644 index 00000000..a64a7932 --- /dev/null +++ b/src/serialization/resources/agents/client/requests/AgentsCallRequest.ts @@ -0,0 +1,66 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../index"; +import * as Humanloop from "../../../../../api/index"; +import * as core from "../../../../../core"; +import { ChatMessage } from "../../../../types/ChatMessage"; +import { AgentsCallRequestToolChoice } from "../../types/AgentsCallRequestToolChoice"; +import { AgentKernelRequest } from "../../../../types/AgentKernelRequest"; +import { LogStatus } from "../../../../types/LogStatus"; +import { ProviderApiKeys } from "../../../../types/ProviderApiKeys"; + +export const AgentsCallRequest: core.serialization.Schema< + serializers.AgentsCallRequest.Raw, + Omit +> = core.serialization.object({ + path: core.serialization.string().optional(), + id: core.serialization.string().optional(), + messages: core.serialization.list(ChatMessage).optional(), + toolChoice: core.serialization.property("tool_choice", AgentsCallRequestToolChoice.optional()), + agent: AgentKernelRequest.optional(), + inputs: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), + source: core.serialization.string().optional(), + metadata: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), + startTime: core.serialization.property("start_time", core.serialization.date().optional()), + endTime: core.serialization.property("end_time", core.serialization.date().optional()), + logStatus: core.serialization.property("log_status", LogStatus.optional()), + sourceDatapointId: core.serialization.property("source_datapoint_id", core.serialization.string().optional()), + traceParentId: core.serialization.property("trace_parent_id", core.serialization.string().optional()), + user: core.serialization.string().optional(), + agentsCallRequestEnvironment: core.serialization.property("environment", core.serialization.string().optional()), + save: core.serialization.boolean().optional(), + logId: core.serialization.property("log_id", core.serialization.string().optional()), + providerApiKeys: core.serialization.property("provider_api_keys", ProviderApiKeys.optional()), + returnInputs: core.serialization.property("return_inputs", core.serialization.boolean().optional()), + includeTraceChildren: core.serialization.property( + "include_trace_children", + core.serialization.boolean().optional(), + ), +}); + +export declare namespace AgentsCallRequest { + export interface Raw { + path?: string | null; + id?: string | null; + messages?: ChatMessage.Raw[] | null; + tool_choice?: AgentsCallRequestToolChoice.Raw | null; + agent?: AgentKernelRequest.Raw | null; + inputs?: Record | null; + source?: string | null; + metadata?: Record | null; + start_time?: string | null; + end_time?: string | null; + log_status?: LogStatus.Raw | null; + source_datapoint_id?: string | null; + trace_parent_id?: string | null; + user?: string | null; + environment?: string | null; + save?: boolean | null; + log_id?: string | null; + provider_api_keys?: ProviderApiKeys.Raw | null; + return_inputs?: boolean | null; + include_trace_children?: boolean | null; + } +} diff --git a/src/serialization/resources/agents/client/requests/AgentsCallStreamRequest.ts b/src/serialization/resources/agents/client/requests/AgentsCallStreamRequest.ts new file mode 100644 index 00000000..b0557ace --- /dev/null +++ b/src/serialization/resources/agents/client/requests/AgentsCallStreamRequest.ts @@ -0,0 +1,69 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../index"; +import * as Humanloop from "../../../../../api/index"; +import * as core from "../../../../../core"; +import { ChatMessage } from "../../../../types/ChatMessage"; +import { AgentsCallStreamRequestToolChoice } from "../../types/AgentsCallStreamRequestToolChoice"; +import { AgentKernelRequest } from "../../../../types/AgentKernelRequest"; +import { LogStatus } from "../../../../types/LogStatus"; +import { ProviderApiKeys } from "../../../../types/ProviderApiKeys"; + +export const AgentsCallStreamRequest: core.serialization.Schema< + serializers.AgentsCallStreamRequest.Raw, + Omit +> = core.serialization.object({ + path: core.serialization.string().optional(), + id: core.serialization.string().optional(), + messages: core.serialization.list(ChatMessage).optional(), + toolChoice: core.serialization.property("tool_choice", AgentsCallStreamRequestToolChoice.optional()), + agent: AgentKernelRequest.optional(), + inputs: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), + source: core.serialization.string().optional(), + metadata: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), + startTime: core.serialization.property("start_time", core.serialization.date().optional()), + endTime: core.serialization.property("end_time", core.serialization.date().optional()), + logStatus: core.serialization.property("log_status", LogStatus.optional()), + sourceDatapointId: core.serialization.property("source_datapoint_id", core.serialization.string().optional()), + traceParentId: core.serialization.property("trace_parent_id", core.serialization.string().optional()), + user: core.serialization.string().optional(), + agentsCallStreamRequestEnvironment: core.serialization.property( + "environment", + core.serialization.string().optional(), + ), + save: core.serialization.boolean().optional(), + logId: core.serialization.property("log_id", core.serialization.string().optional()), + providerApiKeys: core.serialization.property("provider_api_keys", ProviderApiKeys.optional()), + returnInputs: core.serialization.property("return_inputs", core.serialization.boolean().optional()), + includeTraceChildren: core.serialization.property( + "include_trace_children", + core.serialization.boolean().optional(), + ), +}); + +export declare namespace AgentsCallStreamRequest { + export interface Raw { + path?: string | null; + id?: string | null; + messages?: ChatMessage.Raw[] | null; + tool_choice?: AgentsCallStreamRequestToolChoice.Raw | null; + agent?: AgentKernelRequest.Raw | null; + inputs?: Record | null; + source?: string | null; + metadata?: Record | null; + start_time?: string | null; + end_time?: string | null; + log_status?: LogStatus.Raw | null; + source_datapoint_id?: string | null; + trace_parent_id?: string | null; + user?: string | null; + environment?: string | null; + save?: boolean | null; + log_id?: string | null; + provider_api_keys?: ProviderApiKeys.Raw | null; + return_inputs?: boolean | null; + include_trace_children?: boolean | null; + } +} diff --git a/src/serialization/resources/agents/client/requests/AgentsContinueCallRequest.ts b/src/serialization/resources/agents/client/requests/AgentsContinueCallRequest.ts new file mode 100644 index 00000000..b5609c63 --- /dev/null +++ b/src/serialization/resources/agents/client/requests/AgentsContinueCallRequest.ts @@ -0,0 +1,31 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../index"; +import * as Humanloop from "../../../../../api/index"; +import * as core from "../../../../../core"; +import { ChatMessage } from "../../../../types/ChatMessage"; +import { ProviderApiKeys } from "../../../../types/ProviderApiKeys"; + +export const AgentsContinueCallRequest: core.serialization.Schema< + serializers.AgentsContinueCallRequest.Raw, + Humanloop.AgentsContinueCallRequest +> = core.serialization.object({ + logId: core.serialization.property("log_id", core.serialization.string()), + messages: core.serialization.list(ChatMessage), + providerApiKeys: core.serialization.property("provider_api_keys", ProviderApiKeys.optional()), + includeTraceChildren: core.serialization.property( + "include_trace_children", + core.serialization.boolean().optional(), + ), +}); + +export declare namespace AgentsContinueCallRequest { + export interface Raw { + log_id: string; + messages: ChatMessage.Raw[]; + provider_api_keys?: ProviderApiKeys.Raw | null; + include_trace_children?: boolean | null; + } +} diff --git a/src/serialization/resources/agents/client/requests/AgentsContinueCallStreamRequest.ts b/src/serialization/resources/agents/client/requests/AgentsContinueCallStreamRequest.ts new file mode 100644 index 00000000..ff3c10d8 --- /dev/null +++ b/src/serialization/resources/agents/client/requests/AgentsContinueCallStreamRequest.ts @@ -0,0 +1,31 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../index"; +import * as Humanloop from "../../../../../api/index"; +import * as core from "../../../../../core"; +import { ChatMessage } from "../../../../types/ChatMessage"; +import { ProviderApiKeys } from "../../../../types/ProviderApiKeys"; + +export const AgentsContinueCallStreamRequest: core.serialization.Schema< + serializers.AgentsContinueCallStreamRequest.Raw, + Humanloop.AgentsContinueCallStreamRequest +> = core.serialization.object({ + logId: core.serialization.property("log_id", core.serialization.string()), + messages: core.serialization.list(ChatMessage), + providerApiKeys: core.serialization.property("provider_api_keys", ProviderApiKeys.optional()), + includeTraceChildren: core.serialization.property( + "include_trace_children", + core.serialization.boolean().optional(), + ), +}); + +export declare namespace AgentsContinueCallStreamRequest { + export interface Raw { + log_id: string; + messages: ChatMessage.Raw[]; + provider_api_keys?: ProviderApiKeys.Raw | null; + include_trace_children?: boolean | null; + } +} diff --git a/src/serialization/resources/agents/client/requests/BodyDeserializeAgentsDeserializePost.ts b/src/serialization/resources/agents/client/requests/BodyDeserializeAgentsDeserializePost.ts new file mode 100644 index 00000000..5946cf97 --- /dev/null +++ b/src/serialization/resources/agents/client/requests/BodyDeserializeAgentsDeserializePost.ts @@ -0,0 +1,20 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../index"; +import * as Humanloop from "../../../../../api/index"; +import * as core from "../../../../../core"; + +export const BodyDeserializeAgentsDeserializePost: core.serialization.Schema< + serializers.BodyDeserializeAgentsDeserializePost.Raw, + Humanloop.BodyDeserializeAgentsDeserializePost +> = core.serialization.object({ + agent: core.serialization.string(), +}); + +export declare namespace BodyDeserializeAgentsDeserializePost { + export interface Raw { + agent: string; + } +} diff --git a/src/serialization/resources/agents/client/requests/UpdateAgentLogRequest.ts b/src/serialization/resources/agents/client/requests/UpdateAgentLogRequest.ts new file mode 100644 index 00000000..a6c3864b --- /dev/null +++ b/src/serialization/resources/agents/client/requests/UpdateAgentLogRequest.ts @@ -0,0 +1,32 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../index"; +import * as Humanloop from "../../../../../api/index"; +import * as core from "../../../../../core"; +import { ChatMessage } from "../../../../types/ChatMessage"; +import { LogStatus } from "../../../../types/LogStatus"; + +export const UpdateAgentLogRequest: core.serialization.Schema< + serializers.UpdateAgentLogRequest.Raw, + Humanloop.UpdateAgentLogRequest +> = core.serialization.object({ + messages: core.serialization.list(ChatMessage).optional(), + outputMessage: core.serialization.property("output_message", ChatMessage.optional()), + inputs: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), + output: core.serialization.string().optional(), + error: core.serialization.string().optional(), + logStatus: core.serialization.property("log_status", LogStatus.optional()), +}); + +export declare namespace UpdateAgentLogRequest { + export interface Raw { + messages?: ChatMessage.Raw[] | null; + output_message?: ChatMessage.Raw | null; + inputs?: Record | null; + output?: string | null; + error?: string | null; + log_status?: LogStatus.Raw | null; + } +} diff --git a/src/serialization/resources/agents/client/requests/UpdateAgentRequest.ts b/src/serialization/resources/agents/client/requests/UpdateAgentRequest.ts new file mode 100644 index 00000000..5f95529a --- /dev/null +++ b/src/serialization/resources/agents/client/requests/UpdateAgentRequest.ts @@ -0,0 +1,24 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../index"; +import * as Humanloop from "../../../../../api/index"; +import * as core from "../../../../../core"; + +export const UpdateAgentRequest: core.serialization.Schema< + serializers.UpdateAgentRequest.Raw, + Humanloop.UpdateAgentRequest +> = core.serialization.object({ + path: core.serialization.string().optional(), + name: core.serialization.string().optional(), + directoryId: core.serialization.property("directory_id", core.serialization.string().optional()), +}); + +export declare namespace UpdateAgentRequest { + export interface Raw { + path?: string | null; + name?: string | null; + directory_id?: string | null; + } +} diff --git a/src/serialization/resources/agents/client/requests/index.ts b/src/serialization/resources/agents/client/requests/index.ts new file mode 100644 index 00000000..80a83ab3 --- /dev/null +++ b/src/serialization/resources/agents/client/requests/index.ts @@ -0,0 +1,9 @@ +export { AgentLogRequest } from "./AgentLogRequest"; +export { UpdateAgentLogRequest } from "./UpdateAgentLogRequest"; +export { AgentsCallStreamRequest } from "./AgentsCallStreamRequest"; +export { AgentsCallRequest } from "./AgentsCallRequest"; +export { AgentsContinueCallStreamRequest } from "./AgentsContinueCallStreamRequest"; +export { AgentsContinueCallRequest } from "./AgentsContinueCallRequest"; +export { AgentRequest } from "./AgentRequest"; +export { UpdateAgentRequest } from "./UpdateAgentRequest"; +export { BodyDeserializeAgentsDeserializePost } from "./BodyDeserializeAgentsDeserializePost"; diff --git a/src/serialization/resources/agents/index.ts b/src/serialization/resources/agents/index.ts new file mode 100644 index 00000000..c9240f83 --- /dev/null +++ b/src/serialization/resources/agents/index.ts @@ -0,0 +1,2 @@ +export * from "./types"; +export * from "./client"; diff --git a/src/serialization/resources/agents/types/AgentLogRequestToolChoice.ts b/src/serialization/resources/agents/types/AgentLogRequestToolChoice.ts new file mode 100644 index 00000000..ce6a353a --- /dev/null +++ b/src/serialization/resources/agents/types/AgentLogRequestToolChoice.ts @@ -0,0 +1,22 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Humanloop from "../../../../api/index"; +import * as core from "../../../../core"; +import { ToolChoice } from "../../../types/ToolChoice"; + +export const AgentLogRequestToolChoice: core.serialization.Schema< + serializers.AgentLogRequestToolChoice.Raw, + Humanloop.AgentLogRequestToolChoice +> = core.serialization.undiscriminatedUnion([ + core.serialization.stringLiteral("none"), + core.serialization.stringLiteral("auto"), + core.serialization.stringLiteral("required"), + ToolChoice, +]); + +export declare namespace AgentLogRequestToolChoice { + export type Raw = "none" | "auto" | "required" | ToolChoice.Raw; +} diff --git a/src/serialization/resources/agents/types/AgentRequestReasoningEffort.ts b/src/serialization/resources/agents/types/AgentRequestReasoningEffort.ts new file mode 100644 index 00000000..835dd02a --- /dev/null +++ b/src/serialization/resources/agents/types/AgentRequestReasoningEffort.ts @@ -0,0 +1,17 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Humanloop from "../../../../api/index"; +import * as core from "../../../../core"; +import { OpenAiReasoningEffort } from "../../../types/OpenAiReasoningEffort"; + +export const AgentRequestReasoningEffort: core.serialization.Schema< + serializers.AgentRequestReasoningEffort.Raw, + Humanloop.AgentRequestReasoningEffort +> = core.serialization.undiscriminatedUnion([OpenAiReasoningEffort, core.serialization.number()]); + +export declare namespace AgentRequestReasoningEffort { + export type Raw = OpenAiReasoningEffort.Raw | number; +} diff --git a/src/serialization/resources/agents/types/AgentRequestStop.ts b/src/serialization/resources/agents/types/AgentRequestStop.ts new file mode 100644 index 00000000..f04f812e --- /dev/null +++ b/src/serialization/resources/agents/types/AgentRequestStop.ts @@ -0,0 +1,17 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Humanloop from "../../../../api/index"; +import * as core from "../../../../core"; + +export const AgentRequestStop: core.serialization.Schema = + core.serialization.undiscriminatedUnion([ + core.serialization.string(), + core.serialization.list(core.serialization.string()), + ]); + +export declare namespace AgentRequestStop { + export type Raw = string | string[]; +} diff --git a/src/serialization/resources/agents/types/AgentRequestTemplate.ts b/src/serialization/resources/agents/types/AgentRequestTemplate.ts new file mode 100644 index 00000000..3f98e30c --- /dev/null +++ b/src/serialization/resources/agents/types/AgentRequestTemplate.ts @@ -0,0 +1,17 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Humanloop from "../../../../api/index"; +import * as core from "../../../../core"; +import { ChatMessage } from "../../../types/ChatMessage"; + +export const AgentRequestTemplate: core.serialization.Schema< + serializers.AgentRequestTemplate.Raw, + Humanloop.AgentRequestTemplate +> = core.serialization.undiscriminatedUnion([core.serialization.string(), core.serialization.list(ChatMessage)]); + +export declare namespace AgentRequestTemplate { + export type Raw = string | ChatMessage.Raw[]; +} diff --git a/src/serialization/resources/agents/types/AgentRequestToolsItem.ts b/src/serialization/resources/agents/types/AgentRequestToolsItem.ts new file mode 100644 index 00000000..12858ab0 --- /dev/null +++ b/src/serialization/resources/agents/types/AgentRequestToolsItem.ts @@ -0,0 +1,18 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Humanloop from "../../../../api/index"; +import * as core from "../../../../core"; +import { AgentLinkedFileRequest } from "../../../types/AgentLinkedFileRequest"; +import { AgentInlineTool } from "../../../types/AgentInlineTool"; + +export const AgentRequestToolsItem: core.serialization.Schema< + serializers.AgentRequestToolsItem.Raw, + Humanloop.AgentRequestToolsItem +> = core.serialization.undiscriminatedUnion([AgentLinkedFileRequest, AgentInlineTool]); + +export declare namespace AgentRequestToolsItem { + export type Raw = AgentLinkedFileRequest.Raw | AgentInlineTool.Raw; +} diff --git a/src/serialization/resources/agents/types/AgentsCallRequestToolChoice.ts b/src/serialization/resources/agents/types/AgentsCallRequestToolChoice.ts new file mode 100644 index 00000000..e52eb45d --- /dev/null +++ b/src/serialization/resources/agents/types/AgentsCallRequestToolChoice.ts @@ -0,0 +1,22 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Humanloop from "../../../../api/index"; +import * as core from "../../../../core"; +import { ToolChoice } from "../../../types/ToolChoice"; + +export const AgentsCallRequestToolChoice: core.serialization.Schema< + serializers.AgentsCallRequestToolChoice.Raw, + Humanloop.AgentsCallRequestToolChoice +> = core.serialization.undiscriminatedUnion([ + core.serialization.stringLiteral("none"), + core.serialization.stringLiteral("auto"), + core.serialization.stringLiteral("required"), + ToolChoice, +]); + +export declare namespace AgentsCallRequestToolChoice { + export type Raw = "none" | "auto" | "required" | ToolChoice.Raw; +} diff --git a/src/serialization/resources/agents/types/AgentsCallStreamRequestToolChoice.ts b/src/serialization/resources/agents/types/AgentsCallStreamRequestToolChoice.ts new file mode 100644 index 00000000..2cdcb458 --- /dev/null +++ b/src/serialization/resources/agents/types/AgentsCallStreamRequestToolChoice.ts @@ -0,0 +1,22 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Humanloop from "../../../../api/index"; +import * as core from "../../../../core"; +import { ToolChoice } from "../../../types/ToolChoice"; + +export const AgentsCallStreamRequestToolChoice: core.serialization.Schema< + serializers.AgentsCallStreamRequestToolChoice.Raw, + Humanloop.AgentsCallStreamRequestToolChoice +> = core.serialization.undiscriminatedUnion([ + core.serialization.stringLiteral("none"), + core.serialization.stringLiteral("auto"), + core.serialization.stringLiteral("required"), + ToolChoice, +]); + +export declare namespace AgentsCallStreamRequestToolChoice { + export type Raw = "none" | "auto" | "required" | ToolChoice.Raw; +} diff --git a/src/serialization/resources/agents/types/index.ts b/src/serialization/resources/agents/types/index.ts new file mode 100644 index 00000000..8a8a004f --- /dev/null +++ b/src/serialization/resources/agents/types/index.ts @@ -0,0 +1,7 @@ +export * from "./AgentLogRequestToolChoice"; +export * from "./AgentsCallStreamRequestToolChoice"; +export * from "./AgentsCallRequestToolChoice"; +export * from "./AgentRequestTemplate"; +export * from "./AgentRequestStop"; +export * from "./AgentRequestReasoningEffort"; +export * from "./AgentRequestToolsItem"; diff --git a/src/serialization/resources/files/types/RetrieveByPathFilesRetrieveByPathPostResponse.ts b/src/serialization/resources/files/types/RetrieveByPathFilesRetrieveByPathPostResponse.ts index 64363bcb..42c7260a 100644 --- a/src/serialization/resources/files/types/RetrieveByPathFilesRetrieveByPathPostResponse.ts +++ b/src/serialization/resources/files/types/RetrieveByPathFilesRetrieveByPathPostResponse.ts @@ -16,6 +16,7 @@ export const RetrieveByPathFilesRetrieveByPathPostResponse: core.serialization.S DatasetResponse, core.serialization.lazyObject(() => serializers.EvaluatorResponse), core.serialization.lazyObject(() => serializers.FlowResponse), + core.serialization.lazyObject(() => serializers.AgentResponse), ]); export declare namespace RetrieveByPathFilesRetrieveByPathPostResponse { @@ -24,5 +25,6 @@ export declare namespace RetrieveByPathFilesRetrieveByPathPostResponse { | serializers.ToolResponse.Raw | DatasetResponse.Raw | serializers.EvaluatorResponse.Raw - | serializers.FlowResponse.Raw; + | serializers.FlowResponse.Raw + | serializers.AgentResponse.Raw; } diff --git a/src/serialization/resources/index.ts b/src/serialization/resources/index.ts index b7ffbeb8..f8e23c1c 100644 --- a/src/serialization/resources/index.ts +++ b/src/serialization/resources/index.ts @@ -4,6 +4,8 @@ export * as datasets from "./datasets"; export * from "./datasets/types"; export * as evaluators from "./evaluators"; export * from "./evaluators/types"; +export * as agents from "./agents"; +export * from "./agents/types"; export * as files from "./files"; export * from "./files/types"; export * as evaluations from "./evaluations"; @@ -16,6 +18,7 @@ export * from "./tools/client/requests"; export * from "./datasets/client/requests"; export * from "./evaluators/client/requests"; export * from "./flows/client/requests"; +export * from "./agents/client/requests"; export * from "./directories/client/requests"; export * from "./files/client/requests"; export * from "./evaluations/client/requests"; diff --git a/src/serialization/resources/prompts/client/requests/BodyDeserializePromptsDeserializePost.ts b/src/serialization/resources/prompts/client/requests/BodyDeserializePromptsDeserializePost.ts new file mode 100644 index 00000000..5fc3b396 --- /dev/null +++ b/src/serialization/resources/prompts/client/requests/BodyDeserializePromptsDeserializePost.ts @@ -0,0 +1,20 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../index"; +import * as Humanloop from "../../../../../api/index"; +import * as core from "../../../../../core"; + +export const BodyDeserializePromptsDeserializePost: core.serialization.Schema< + serializers.BodyDeserializePromptsDeserializePost.Raw, + Humanloop.BodyDeserializePromptsDeserializePost +> = core.serialization.object({ + prompt: core.serialization.string(), +}); + +export declare namespace BodyDeserializePromptsDeserializePost { + export interface Raw { + prompt: string; + } +} diff --git a/src/serialization/resources/prompts/client/requests/PromptRequest.ts b/src/serialization/resources/prompts/client/requests/PromptRequest.ts index cda133db..261bfc95 100644 --- a/src/serialization/resources/prompts/client/requests/PromptRequest.ts +++ b/src/serialization/resources/prompts/client/requests/PromptRequest.ts @@ -11,7 +11,7 @@ import { TemplateLanguage } from "../../../../types/TemplateLanguage"; import { ModelProviders } from "../../../../types/ModelProviders"; import { PromptRequestStop } from "../../types/PromptRequestStop"; import { ResponseFormat } from "../../../../types/ResponseFormat"; -import { ReasoningEffort } from "../../../../types/ReasoningEffort"; +import { PromptRequestReasoningEffort } from "../../types/PromptRequestReasoningEffort"; import { ToolFunction } from "../../../../types/ToolFunction"; export const PromptRequest: core.serialization.Schema = @@ -32,7 +32,7 @@ export const PromptRequest: core.serialization.Schema | null; seed?: number | null; response_format?: ResponseFormat.Raw | null; - reasoning_effort?: ReasoningEffort.Raw | null; + reasoning_effort?: PromptRequestReasoningEffort.Raw | null; tools?: ToolFunction.Raw[] | null; linked_tools?: string[] | null; attributes?: Record | null; diff --git a/src/serialization/resources/prompts/client/requests/index.ts b/src/serialization/resources/prompts/client/requests/index.ts index 29e3fe12..717b2974 100644 --- a/src/serialization/resources/prompts/client/requests/index.ts +++ b/src/serialization/resources/prompts/client/requests/index.ts @@ -4,3 +4,4 @@ export { PromptsCallStreamRequest } from "./PromptsCallStreamRequest"; export { PromptsCallRequest } from "./PromptsCallRequest"; export { PromptRequest } from "./PromptRequest"; export { UpdatePromptRequest } from "./UpdatePromptRequest"; +export { BodyDeserializePromptsDeserializePost } from "./BodyDeserializePromptsDeserializePost"; diff --git a/src/serialization/resources/prompts/types/PromptRequestReasoningEffort.ts b/src/serialization/resources/prompts/types/PromptRequestReasoningEffort.ts new file mode 100644 index 00000000..644aea27 --- /dev/null +++ b/src/serialization/resources/prompts/types/PromptRequestReasoningEffort.ts @@ -0,0 +1,17 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Humanloop from "../../../../api/index"; +import * as core from "../../../../core"; +import { OpenAiReasoningEffort } from "../../../types/OpenAiReasoningEffort"; + +export const PromptRequestReasoningEffort: core.serialization.Schema< + serializers.PromptRequestReasoningEffort.Raw, + Humanloop.PromptRequestReasoningEffort +> = core.serialization.undiscriminatedUnion([OpenAiReasoningEffort, core.serialization.number()]); + +export declare namespace PromptRequestReasoningEffort { + export type Raw = OpenAiReasoningEffort.Raw | number; +} diff --git a/src/serialization/resources/prompts/types/index.ts b/src/serialization/resources/prompts/types/index.ts index a5d3901d..8265b2f3 100644 --- a/src/serialization/resources/prompts/types/index.ts +++ b/src/serialization/resources/prompts/types/index.ts @@ -4,3 +4,4 @@ export * from "./PromptsCallStreamRequestToolChoice"; export * from "./PromptsCallRequestToolChoice"; export * from "./PromptRequestTemplate"; export * from "./PromptRequestStop"; +export * from "./PromptRequestReasoningEffort"; diff --git a/src/serialization/resources/tools/client/addEnvironmentVariable.ts b/src/serialization/resources/tools/client/addEnvironmentVariable.ts new file mode 100644 index 00000000..cb9bccae --- /dev/null +++ b/src/serialization/resources/tools/client/addEnvironmentVariable.ts @@ -0,0 +1,26 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Humanloop from "../../../../api/index"; +import * as core from "../../../../core"; +import { FileEnvironmentVariableRequest } from "../../../types/FileEnvironmentVariableRequest"; + +export const Request: core.serialization.Schema< + serializers.tools.addEnvironmentVariable.Request.Raw, + Humanloop.FileEnvironmentVariableRequest[] +> = core.serialization.list(FileEnvironmentVariableRequest); + +export declare namespace Request { + export type Raw = FileEnvironmentVariableRequest.Raw[]; +} + +export const Response: core.serialization.Schema< + serializers.tools.addEnvironmentVariable.Response.Raw, + Humanloop.FileEnvironmentVariableRequest[] +> = core.serialization.list(FileEnvironmentVariableRequest); + +export declare namespace Response { + export type Raw = FileEnvironmentVariableRequest.Raw[]; +} diff --git a/src/serialization/resources/tools/client/deleteEnvironmentVariable.ts b/src/serialization/resources/tools/client/deleteEnvironmentVariable.ts new file mode 100644 index 00000000..24c69457 --- /dev/null +++ b/src/serialization/resources/tools/client/deleteEnvironmentVariable.ts @@ -0,0 +1,17 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Humanloop from "../../../../api/index"; +import * as core from "../../../../core"; +import { FileEnvironmentVariableRequest } from "../../../types/FileEnvironmentVariableRequest"; + +export const Response: core.serialization.Schema< + serializers.tools.deleteEnvironmentVariable.Response.Raw, + Humanloop.FileEnvironmentVariableRequest[] +> = core.serialization.list(FileEnvironmentVariableRequest); + +export declare namespace Response { + export type Raw = FileEnvironmentVariableRequest.Raw[]; +} diff --git a/src/serialization/resources/tools/client/getEnvironmentVariables.ts b/src/serialization/resources/tools/client/getEnvironmentVariables.ts new file mode 100644 index 00000000..c2f96cee --- /dev/null +++ b/src/serialization/resources/tools/client/getEnvironmentVariables.ts @@ -0,0 +1,17 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Humanloop from "../../../../api/index"; +import * as core from "../../../../core"; +import { FileEnvironmentVariableRequest } from "../../../types/FileEnvironmentVariableRequest"; + +export const Response: core.serialization.Schema< + serializers.tools.getEnvironmentVariables.Response.Raw, + Humanloop.FileEnvironmentVariableRequest[] +> = core.serialization.list(FileEnvironmentVariableRequest); + +export declare namespace Response { + export type Raw = FileEnvironmentVariableRequest.Raw[]; +} diff --git a/src/serialization/resources/tools/client/index.ts b/src/serialization/resources/tools/client/index.ts index 0ab8d679..ca75a2d8 100644 --- a/src/serialization/resources/tools/client/index.ts +++ b/src/serialization/resources/tools/client/index.ts @@ -1,2 +1,5 @@ export * as listEnvironments from "./listEnvironments"; +export * as getEnvironmentVariables from "./getEnvironmentVariables"; +export * as addEnvironmentVariable from "./addEnvironmentVariable"; +export * as deleteEnvironmentVariable from "./deleteEnvironmentVariable"; export * from "./requests"; diff --git a/src/serialization/resources/tools/client/requests/ToolCallRequest.ts b/src/serialization/resources/tools/client/requests/ToolCallRequest.ts new file mode 100644 index 00000000..1c19f6ae --- /dev/null +++ b/src/serialization/resources/tools/client/requests/ToolCallRequest.ts @@ -0,0 +1,50 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../index"; +import * as Humanloop from "../../../../../api/index"; +import * as core from "../../../../../core"; +import { ToolKernelRequest } from "../../../../types/ToolKernelRequest"; +import { LogStatus } from "../../../../types/LogStatus"; + +export const ToolCallRequest: core.serialization.Schema< + serializers.ToolCallRequest.Raw, + Omit +> = core.serialization.object({ + path: core.serialization.string().optional(), + id: core.serialization.string().optional(), + tool: ToolKernelRequest.optional(), + inputs: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), + source: core.serialization.string().optional(), + metadata: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), + startTime: core.serialization.property("start_time", core.serialization.date().optional()), + endTime: core.serialization.property("end_time", core.serialization.date().optional()), + logStatus: core.serialization.property("log_status", LogStatus.optional()), + sourceDatapointId: core.serialization.property("source_datapoint_id", core.serialization.string().optional()), + traceParentId: core.serialization.property("trace_parent_id", core.serialization.string().optional()), + user: core.serialization.string().optional(), + toolCallRequestEnvironment: core.serialization.property("environment", core.serialization.string().optional()), + save: core.serialization.boolean().optional(), + logId: core.serialization.property("log_id", core.serialization.string().optional()), +}); + +export declare namespace ToolCallRequest { + export interface Raw { + path?: string | null; + id?: string | null; + tool?: ToolKernelRequest.Raw | null; + inputs?: Record | null; + source?: string | null; + metadata?: Record | null; + start_time?: string | null; + end_time?: string | null; + log_status?: LogStatus.Raw | null; + source_datapoint_id?: string | null; + trace_parent_id?: string | null; + user?: string | null; + environment?: string | null; + save?: boolean | null; + log_id?: string | null; + } +} diff --git a/src/serialization/resources/tools/client/requests/ToolLogRequest.ts b/src/serialization/resources/tools/client/requests/ToolLogRequest.ts index 688ec79d..65f07074 100644 --- a/src/serialization/resources/tools/client/requests/ToolLogRequest.ts +++ b/src/serialization/resources/tools/client/requests/ToolLogRequest.ts @@ -5,8 +5,8 @@ import * as serializers from "../../../../index"; import * as Humanloop from "../../../../../api/index"; import * as core from "../../../../../core"; -import { LogStatus } from "../../../../types/LogStatus"; import { ToolKernelRequest } from "../../../../types/ToolKernelRequest"; +import { LogStatus } from "../../../../types/LogStatus"; export const ToolLogRequest: core.serialization.Schema< serializers.ToolLogRequest.Raw, @@ -14,6 +14,7 @@ export const ToolLogRequest: core.serialization.Schema< > = core.serialization.object({ path: core.serialization.string().optional(), id: core.serialization.string().optional(), + tool: ToolKernelRequest.optional(), startTime: core.serialization.property("start_time", core.serialization.date().optional()), endTime: core.serialization.property("end_time", core.serialization.date().optional()), output: core.serialization.string().optional(), @@ -39,13 +40,13 @@ export const ToolLogRequest: core.serialization.Schema< toolLogRequestEnvironment: core.serialization.property("environment", core.serialization.string().optional()), save: core.serialization.boolean().optional(), logId: core.serialization.property("log_id", core.serialization.string().optional()), - tool: ToolKernelRequest.optional(), }); export declare namespace ToolLogRequest { export interface Raw { path?: string | null; id?: string | null; + tool?: ToolKernelRequest.Raw | null; start_time?: string | null; end_time?: string | null; output?: string | null; @@ -65,6 +66,5 @@ export declare namespace ToolLogRequest { environment?: string | null; save?: boolean | null; log_id?: string | null; - tool?: ToolKernelRequest.Raw | null; } } diff --git a/src/serialization/resources/tools/client/requests/index.ts b/src/serialization/resources/tools/client/requests/index.ts index 3bab2624..542f22e3 100644 --- a/src/serialization/resources/tools/client/requests/index.ts +++ b/src/serialization/resources/tools/client/requests/index.ts @@ -1,3 +1,4 @@ +export { ToolCallRequest } from "./ToolCallRequest"; export { ToolLogRequest } from "./ToolLogRequest"; export { ToolLogUpdateRequest } from "./ToolLogUpdateRequest"; export { ToolRequest } from "./ToolRequest"; diff --git a/src/serialization/types/AgentCallResponse.ts b/src/serialization/types/AgentCallResponse.ts new file mode 100644 index 00000000..e92371e4 --- /dev/null +++ b/src/serialization/types/AgentCallResponse.ts @@ -0,0 +1,105 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; +import { ChatMessage } from "./ChatMessage"; +import { AgentCallResponseToolChoice } from "./AgentCallResponseToolChoice"; +import { LogStatus } from "./LogStatus"; + +export const AgentCallResponse: core.serialization.ObjectSchema< + serializers.AgentCallResponse.Raw, + Humanloop.AgentCallResponse +> = core.serialization.object({ + outputMessage: core.serialization.property("output_message", ChatMessage.optional()), + promptTokens: core.serialization.property("prompt_tokens", core.serialization.number().optional()), + reasoningTokens: core.serialization.property("reasoning_tokens", core.serialization.number().optional()), + outputTokens: core.serialization.property("output_tokens", core.serialization.number().optional()), + promptCost: core.serialization.property("prompt_cost", core.serialization.number().optional()), + outputCost: core.serialization.property("output_cost", core.serialization.number().optional()), + finishReason: core.serialization.property("finish_reason", core.serialization.string().optional()), + messages: core.serialization.list(ChatMessage).optional(), + toolChoice: core.serialization.property("tool_choice", AgentCallResponseToolChoice.optional()), + agent: core.serialization.lazyObject(() => serializers.AgentResponse), + startTime: core.serialization.property("start_time", core.serialization.date().optional()), + endTime: core.serialization.property("end_time", core.serialization.date().optional()), + output: core.serialization.string().optional(), + createdAt: core.serialization.property("created_at", core.serialization.date().optional()), + error: core.serialization.string().optional(), + providerLatency: core.serialization.property("provider_latency", core.serialization.number().optional()), + stdout: core.serialization.string().optional(), + providerRequest: core.serialization.property( + "provider_request", + core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), + ), + providerResponse: core.serialization.property( + "provider_response", + core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), + ), + inputs: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), + source: core.serialization.string().optional(), + metadata: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), + logStatus: core.serialization.property("log_status", LogStatus.optional()), + sourceDatapointId: core.serialization.property("source_datapoint_id", core.serialization.string().optional()), + traceParentId: core.serialization.property("trace_parent_id", core.serialization.string().optional()), + batches: core.serialization.list(core.serialization.string()).optional(), + user: core.serialization.string().optional(), + environment: core.serialization.string().optional(), + save: core.serialization.boolean().optional(), + logId: core.serialization.property("log_id", core.serialization.string().optional()), + id: core.serialization.string(), + evaluatorLogs: core.serialization.property( + "evaluator_logs", + core.serialization.list(core.serialization.lazyObject(() => serializers.EvaluatorLogResponse)), + ), + traceFlowId: core.serialization.property("trace_flow_id", core.serialization.string().optional()), + traceId: core.serialization.property("trace_id", core.serialization.string().optional()), + traceChildren: core.serialization.property( + "trace_children", + core.serialization.list(core.serialization.lazy(() => serializers.LogResponse)).optional(), + ), + previousAgentMessage: core.serialization.property("previous_agent_message", ChatMessage.optional()), +}); + +export declare namespace AgentCallResponse { + export interface Raw { + output_message?: ChatMessage.Raw | null; + prompt_tokens?: number | null; + reasoning_tokens?: number | null; + output_tokens?: number | null; + prompt_cost?: number | null; + output_cost?: number | null; + finish_reason?: string | null; + messages?: ChatMessage.Raw[] | null; + tool_choice?: AgentCallResponseToolChoice.Raw | null; + agent: serializers.AgentResponse.Raw; + start_time?: string | null; + end_time?: string | null; + output?: string | null; + created_at?: string | null; + error?: string | null; + provider_latency?: number | null; + stdout?: string | null; + provider_request?: Record | null; + provider_response?: Record | null; + inputs?: Record | null; + source?: string | null; + metadata?: Record | null; + log_status?: LogStatus.Raw | null; + source_datapoint_id?: string | null; + trace_parent_id?: string | null; + batches?: string[] | null; + user?: string | null; + environment?: string | null; + save?: boolean | null; + log_id?: string | null; + id: string; + evaluator_logs: serializers.EvaluatorLogResponse.Raw[]; + trace_flow_id?: string | null; + trace_id?: string | null; + trace_children?: serializers.LogResponse.Raw[] | null; + previous_agent_message?: ChatMessage.Raw | null; + } +} diff --git a/src/serialization/types/AgentCallResponseToolChoice.ts b/src/serialization/types/AgentCallResponseToolChoice.ts new file mode 100644 index 00000000..45d00fcb --- /dev/null +++ b/src/serialization/types/AgentCallResponseToolChoice.ts @@ -0,0 +1,22 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; +import { ToolChoice } from "./ToolChoice"; + +export const AgentCallResponseToolChoice: core.serialization.Schema< + serializers.AgentCallResponseToolChoice.Raw, + Humanloop.AgentCallResponseToolChoice +> = core.serialization.undiscriminatedUnion([ + core.serialization.stringLiteral("none"), + core.serialization.stringLiteral("auto"), + core.serialization.stringLiteral("required"), + ToolChoice, +]); + +export declare namespace AgentCallResponseToolChoice { + export type Raw = "none" | "auto" | "required" | ToolChoice.Raw; +} diff --git a/src/serialization/types/AgentCallStreamResponse.ts b/src/serialization/types/AgentCallStreamResponse.ts new file mode 100644 index 00000000..15bb94ee --- /dev/null +++ b/src/serialization/types/AgentCallStreamResponse.ts @@ -0,0 +1,30 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; +import { AgentCallStreamResponsePayload } from "./AgentCallStreamResponsePayload"; +import { EventType } from "./EventType"; + +export const AgentCallStreamResponse: core.serialization.ObjectSchema< + serializers.AgentCallStreamResponse.Raw, + Humanloop.AgentCallStreamResponse +> = core.serialization.object({ + logId: core.serialization.property("log_id", core.serialization.string()), + message: core.serialization.string(), + payload: AgentCallStreamResponsePayload.optional(), + type: EventType, + createdAt: core.serialization.property("created_at", core.serialization.date()), +}); + +export declare namespace AgentCallStreamResponse { + export interface Raw { + log_id: string; + message: string; + payload?: AgentCallStreamResponsePayload.Raw | null; + type: EventType.Raw; + created_at: string; + } +} diff --git a/src/serialization/types/AgentCallStreamResponsePayload.ts b/src/serialization/types/AgentCallStreamResponsePayload.ts new file mode 100644 index 00000000..7ed435ff --- /dev/null +++ b/src/serialization/types/AgentCallStreamResponsePayload.ts @@ -0,0 +1,22 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; +import { LogStreamResponse } from "./LogStreamResponse"; +import { ToolCall } from "./ToolCall"; + +export const AgentCallStreamResponsePayload: core.serialization.Schema< + serializers.AgentCallStreamResponsePayload.Raw, + Humanloop.AgentCallStreamResponsePayload +> = core.serialization.undiscriminatedUnion([ + LogStreamResponse, + core.serialization.lazy(() => serializers.LogResponse), + ToolCall, +]); + +export declare namespace AgentCallStreamResponsePayload { + export type Raw = LogStreamResponse.Raw | serializers.LogResponse.Raw | ToolCall.Raw; +} diff --git a/src/serialization/types/AgentContinueCallResponse.ts b/src/serialization/types/AgentContinueCallResponse.ts new file mode 100644 index 00000000..8ed5ff20 --- /dev/null +++ b/src/serialization/types/AgentContinueCallResponse.ts @@ -0,0 +1,105 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; +import { ChatMessage } from "./ChatMessage"; +import { AgentContinueCallResponseToolChoice } from "./AgentContinueCallResponseToolChoice"; +import { LogStatus } from "./LogStatus"; + +export const AgentContinueCallResponse: core.serialization.ObjectSchema< + serializers.AgentContinueCallResponse.Raw, + Humanloop.AgentContinueCallResponse +> = core.serialization.object({ + outputMessage: core.serialization.property("output_message", ChatMessage.optional()), + promptTokens: core.serialization.property("prompt_tokens", core.serialization.number().optional()), + reasoningTokens: core.serialization.property("reasoning_tokens", core.serialization.number().optional()), + outputTokens: core.serialization.property("output_tokens", core.serialization.number().optional()), + promptCost: core.serialization.property("prompt_cost", core.serialization.number().optional()), + outputCost: core.serialization.property("output_cost", core.serialization.number().optional()), + finishReason: core.serialization.property("finish_reason", core.serialization.string().optional()), + messages: core.serialization.list(ChatMessage).optional(), + toolChoice: core.serialization.property("tool_choice", AgentContinueCallResponseToolChoice.optional()), + agent: core.serialization.lazyObject(() => serializers.AgentResponse), + startTime: core.serialization.property("start_time", core.serialization.date().optional()), + endTime: core.serialization.property("end_time", core.serialization.date().optional()), + output: core.serialization.string().optional(), + createdAt: core.serialization.property("created_at", core.serialization.date().optional()), + error: core.serialization.string().optional(), + providerLatency: core.serialization.property("provider_latency", core.serialization.number().optional()), + stdout: core.serialization.string().optional(), + providerRequest: core.serialization.property( + "provider_request", + core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), + ), + providerResponse: core.serialization.property( + "provider_response", + core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), + ), + inputs: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), + source: core.serialization.string().optional(), + metadata: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), + logStatus: core.serialization.property("log_status", LogStatus.optional()), + sourceDatapointId: core.serialization.property("source_datapoint_id", core.serialization.string().optional()), + traceParentId: core.serialization.property("trace_parent_id", core.serialization.string().optional()), + batches: core.serialization.list(core.serialization.string()).optional(), + user: core.serialization.string().optional(), + environment: core.serialization.string().optional(), + save: core.serialization.boolean().optional(), + logId: core.serialization.property("log_id", core.serialization.string().optional()), + id: core.serialization.string(), + evaluatorLogs: core.serialization.property( + "evaluator_logs", + core.serialization.list(core.serialization.lazyObject(() => serializers.EvaluatorLogResponse)), + ), + traceFlowId: core.serialization.property("trace_flow_id", core.serialization.string().optional()), + traceId: core.serialization.property("trace_id", core.serialization.string().optional()), + traceChildren: core.serialization.property( + "trace_children", + core.serialization.list(core.serialization.lazy(() => serializers.LogResponse)).optional(), + ), + previousAgentMessage: core.serialization.property("previous_agent_message", ChatMessage.optional()), +}); + +export declare namespace AgentContinueCallResponse { + export interface Raw { + output_message?: ChatMessage.Raw | null; + prompt_tokens?: number | null; + reasoning_tokens?: number | null; + output_tokens?: number | null; + prompt_cost?: number | null; + output_cost?: number | null; + finish_reason?: string | null; + messages?: ChatMessage.Raw[] | null; + tool_choice?: AgentContinueCallResponseToolChoice.Raw | null; + agent: serializers.AgentResponse.Raw; + start_time?: string | null; + end_time?: string | null; + output?: string | null; + created_at?: string | null; + error?: string | null; + provider_latency?: number | null; + stdout?: string | null; + provider_request?: Record | null; + provider_response?: Record | null; + inputs?: Record | null; + source?: string | null; + metadata?: Record | null; + log_status?: LogStatus.Raw | null; + source_datapoint_id?: string | null; + trace_parent_id?: string | null; + batches?: string[] | null; + user?: string | null; + environment?: string | null; + save?: boolean | null; + log_id?: string | null; + id: string; + evaluator_logs: serializers.EvaluatorLogResponse.Raw[]; + trace_flow_id?: string | null; + trace_id?: string | null; + trace_children?: serializers.LogResponse.Raw[] | null; + previous_agent_message?: ChatMessage.Raw | null; + } +} diff --git a/src/serialization/types/AgentContinueCallResponseToolChoice.ts b/src/serialization/types/AgentContinueCallResponseToolChoice.ts new file mode 100644 index 00000000..834f6b27 --- /dev/null +++ b/src/serialization/types/AgentContinueCallResponseToolChoice.ts @@ -0,0 +1,22 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; +import { ToolChoice } from "./ToolChoice"; + +export const AgentContinueCallResponseToolChoice: core.serialization.Schema< + serializers.AgentContinueCallResponseToolChoice.Raw, + Humanloop.AgentContinueCallResponseToolChoice +> = core.serialization.undiscriminatedUnion([ + core.serialization.stringLiteral("none"), + core.serialization.stringLiteral("auto"), + core.serialization.stringLiteral("required"), + ToolChoice, +]); + +export declare namespace AgentContinueCallResponseToolChoice { + export type Raw = "none" | "auto" | "required" | ToolChoice.Raw; +} diff --git a/src/serialization/types/AgentContinueCallStreamResponse.ts b/src/serialization/types/AgentContinueCallStreamResponse.ts new file mode 100644 index 00000000..369dc502 --- /dev/null +++ b/src/serialization/types/AgentContinueCallStreamResponse.ts @@ -0,0 +1,30 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; +import { AgentContinueCallStreamResponsePayload } from "./AgentContinueCallStreamResponsePayload"; +import { EventType } from "./EventType"; + +export const AgentContinueCallStreamResponse: core.serialization.ObjectSchema< + serializers.AgentContinueCallStreamResponse.Raw, + Humanloop.AgentContinueCallStreamResponse +> = core.serialization.object({ + logId: core.serialization.property("log_id", core.serialization.string()), + message: core.serialization.string(), + payload: AgentContinueCallStreamResponsePayload.optional(), + type: EventType, + createdAt: core.serialization.property("created_at", core.serialization.date()), +}); + +export declare namespace AgentContinueCallStreamResponse { + export interface Raw { + log_id: string; + message: string; + payload?: AgentContinueCallStreamResponsePayload.Raw | null; + type: EventType.Raw; + created_at: string; + } +} diff --git a/src/serialization/types/AgentContinueCallStreamResponsePayload.ts b/src/serialization/types/AgentContinueCallStreamResponsePayload.ts new file mode 100644 index 00000000..1612723e --- /dev/null +++ b/src/serialization/types/AgentContinueCallStreamResponsePayload.ts @@ -0,0 +1,22 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; +import { LogStreamResponse } from "./LogStreamResponse"; +import { ToolCall } from "./ToolCall"; + +export const AgentContinueCallStreamResponsePayload: core.serialization.Schema< + serializers.AgentContinueCallStreamResponsePayload.Raw, + Humanloop.AgentContinueCallStreamResponsePayload +> = core.serialization.undiscriminatedUnion([ + LogStreamResponse, + core.serialization.lazy(() => serializers.LogResponse), + ToolCall, +]); + +export declare namespace AgentContinueCallStreamResponsePayload { + export type Raw = LogStreamResponse.Raw | serializers.LogResponse.Raw | ToolCall.Raw; +} diff --git a/src/serialization/types/AgentInlineTool.ts b/src/serialization/types/AgentInlineTool.ts new file mode 100644 index 00000000..087826a4 --- /dev/null +++ b/src/serialization/types/AgentInlineTool.ts @@ -0,0 +1,26 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; +import { ToolFunction } from "./ToolFunction"; +import { OnAgentCallEnum } from "./OnAgentCallEnum"; + +export const AgentInlineTool: core.serialization.ObjectSchema< + serializers.AgentInlineTool.Raw, + Humanloop.AgentInlineTool +> = core.serialization.object({ + type: core.serialization.stringLiteral("inline"), + jsonSchema: core.serialization.property("json_schema", ToolFunction), + onAgentCall: core.serialization.property("on_agent_call", OnAgentCallEnum.optional()), +}); + +export declare namespace AgentInlineTool { + export interface Raw { + type: "inline"; + json_schema: ToolFunction.Raw; + on_agent_call?: OnAgentCallEnum.Raw | null; + } +} diff --git a/src/serialization/types/AgentKernelRequest.ts b/src/serialization/types/AgentKernelRequest.ts new file mode 100644 index 00000000..f1005570 --- /dev/null +++ b/src/serialization/types/AgentKernelRequest.ts @@ -0,0 +1,62 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; +import { ModelEndpoints } from "./ModelEndpoints"; +import { AgentKernelRequestTemplate } from "./AgentKernelRequestTemplate"; +import { TemplateLanguage } from "./TemplateLanguage"; +import { ModelProviders } from "./ModelProviders"; +import { AgentKernelRequestStop } from "./AgentKernelRequestStop"; +import { ResponseFormat } from "./ResponseFormat"; +import { AgentKernelRequestReasoningEffort } from "./AgentKernelRequestReasoningEffort"; +import { AgentKernelRequestToolsItem } from "./AgentKernelRequestToolsItem"; + +export const AgentKernelRequest: core.serialization.ObjectSchema< + serializers.AgentKernelRequest.Raw, + Humanloop.AgentKernelRequest +> = core.serialization.object({ + model: core.serialization.string(), + endpoint: ModelEndpoints.optional(), + template: AgentKernelRequestTemplate.optional(), + templateLanguage: core.serialization.property("template_language", TemplateLanguage.optional()), + provider: ModelProviders.optional(), + maxTokens: core.serialization.property("max_tokens", core.serialization.number().optional()), + temperature: core.serialization.number().optional(), + topP: core.serialization.property("top_p", core.serialization.number().optional()), + stop: AgentKernelRequestStop.optional(), + presencePenalty: core.serialization.property("presence_penalty", core.serialization.number().optional()), + frequencyPenalty: core.serialization.property("frequency_penalty", core.serialization.number().optional()), + other: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), + seed: core.serialization.number().optional(), + responseFormat: core.serialization.property("response_format", ResponseFormat.optional()), + reasoningEffort: core.serialization.property("reasoning_effort", AgentKernelRequestReasoningEffort.optional()), + tools: core.serialization.list(AgentKernelRequestToolsItem).optional(), + attributes: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), + maxIterations: core.serialization.property("max_iterations", core.serialization.number().optional()), +}); + +export declare namespace AgentKernelRequest { + export interface Raw { + model: string; + endpoint?: ModelEndpoints.Raw | null; + template?: AgentKernelRequestTemplate.Raw | null; + template_language?: TemplateLanguage.Raw | null; + provider?: ModelProviders.Raw | null; + max_tokens?: number | null; + temperature?: number | null; + top_p?: number | null; + stop?: AgentKernelRequestStop.Raw | null; + presence_penalty?: number | null; + frequency_penalty?: number | null; + other?: Record | null; + seed?: number | null; + response_format?: ResponseFormat.Raw | null; + reasoning_effort?: AgentKernelRequestReasoningEffort.Raw | null; + tools?: AgentKernelRequestToolsItem.Raw[] | null; + attributes?: Record | null; + max_iterations?: number | null; + } +} diff --git a/src/serialization/types/AgentKernelRequestReasoningEffort.ts b/src/serialization/types/AgentKernelRequestReasoningEffort.ts new file mode 100644 index 00000000..25dd48a0 --- /dev/null +++ b/src/serialization/types/AgentKernelRequestReasoningEffort.ts @@ -0,0 +1,17 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; +import { OpenAiReasoningEffort } from "./OpenAiReasoningEffort"; + +export const AgentKernelRequestReasoningEffort: core.serialization.Schema< + serializers.AgentKernelRequestReasoningEffort.Raw, + Humanloop.AgentKernelRequestReasoningEffort +> = core.serialization.undiscriminatedUnion([OpenAiReasoningEffort, core.serialization.number()]); + +export declare namespace AgentKernelRequestReasoningEffort { + export type Raw = OpenAiReasoningEffort.Raw | number; +} diff --git a/src/serialization/types/AgentKernelRequestStop.ts b/src/serialization/types/AgentKernelRequestStop.ts new file mode 100644 index 00000000..1b7d0cdc --- /dev/null +++ b/src/serialization/types/AgentKernelRequestStop.ts @@ -0,0 +1,19 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; + +export const AgentKernelRequestStop: core.serialization.Schema< + serializers.AgentKernelRequestStop.Raw, + Humanloop.AgentKernelRequestStop +> = core.serialization.undiscriminatedUnion([ + core.serialization.string(), + core.serialization.list(core.serialization.string()), +]); + +export declare namespace AgentKernelRequestStop { + export type Raw = string | string[]; +} diff --git a/src/serialization/types/AgentKernelRequestTemplate.ts b/src/serialization/types/AgentKernelRequestTemplate.ts new file mode 100644 index 00000000..b3b07762 --- /dev/null +++ b/src/serialization/types/AgentKernelRequestTemplate.ts @@ -0,0 +1,17 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; +import { ChatMessage } from "./ChatMessage"; + +export const AgentKernelRequestTemplate: core.serialization.Schema< + serializers.AgentKernelRequestTemplate.Raw, + Humanloop.AgentKernelRequestTemplate +> = core.serialization.undiscriminatedUnion([core.serialization.string(), core.serialization.list(ChatMessage)]); + +export declare namespace AgentKernelRequestTemplate { + export type Raw = string | ChatMessage.Raw[]; +} diff --git a/src/serialization/types/AgentKernelRequestToolsItem.ts b/src/serialization/types/AgentKernelRequestToolsItem.ts new file mode 100644 index 00000000..7aaeb264 --- /dev/null +++ b/src/serialization/types/AgentKernelRequestToolsItem.ts @@ -0,0 +1,18 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; +import { AgentLinkedFileRequest } from "./AgentLinkedFileRequest"; +import { AgentInlineTool } from "./AgentInlineTool"; + +export const AgentKernelRequestToolsItem: core.serialization.Schema< + serializers.AgentKernelRequestToolsItem.Raw, + Humanloop.AgentKernelRequestToolsItem +> = core.serialization.undiscriminatedUnion([AgentLinkedFileRequest, AgentInlineTool]); + +export declare namespace AgentKernelRequestToolsItem { + export type Raw = AgentLinkedFileRequest.Raw | AgentInlineTool.Raw; +} diff --git a/src/serialization/types/AgentLinkedFileRequest.ts b/src/serialization/types/AgentLinkedFileRequest.ts new file mode 100644 index 00000000..7fbe8863 --- /dev/null +++ b/src/serialization/types/AgentLinkedFileRequest.ts @@ -0,0 +1,26 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; +import { LinkedFileRequest } from "./LinkedFileRequest"; +import { OnAgentCallEnum } from "./OnAgentCallEnum"; + +export const AgentLinkedFileRequest: core.serialization.ObjectSchema< + serializers.AgentLinkedFileRequest.Raw, + Humanloop.AgentLinkedFileRequest +> = core.serialization.object({ + type: core.serialization.stringLiteral("file"), + link: LinkedFileRequest, + onAgentCall: core.serialization.property("on_agent_call", OnAgentCallEnum.optional()), +}); + +export declare namespace AgentLinkedFileRequest { + export interface Raw { + type: "file"; + link: LinkedFileRequest.Raw; + on_agent_call?: OnAgentCallEnum.Raw | null; + } +} diff --git a/src/serialization/types/AgentLinkedFileResponse.ts b/src/serialization/types/AgentLinkedFileResponse.ts new file mode 100644 index 00000000..adc8a98f --- /dev/null +++ b/src/serialization/types/AgentLinkedFileResponse.ts @@ -0,0 +1,28 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; +import { LinkedFileRequest } from "./LinkedFileRequest"; +import { OnAgentCallEnum } from "./OnAgentCallEnum"; + +export const AgentLinkedFileResponse: core.serialization.ObjectSchema< + serializers.AgentLinkedFileResponse.Raw, + Humanloop.AgentLinkedFileResponse +> = core.serialization.object({ + type: core.serialization.stringLiteral("file"), + link: LinkedFileRequest, + onAgentCall: core.serialization.property("on_agent_call", OnAgentCallEnum.optional()), + file: core.serialization.lazy(() => serializers.AgentLinkedFileResponseFile).optional(), +}); + +export declare namespace AgentLinkedFileResponse { + export interface Raw { + type: "file"; + link: LinkedFileRequest.Raw; + on_agent_call?: OnAgentCallEnum.Raw | null; + file?: serializers.AgentLinkedFileResponseFile.Raw | null; + } +} diff --git a/src/serialization/types/AgentLinkedFileResponseFile.ts b/src/serialization/types/AgentLinkedFileResponseFile.ts new file mode 100644 index 00000000..9c337bdc --- /dev/null +++ b/src/serialization/types/AgentLinkedFileResponseFile.ts @@ -0,0 +1,30 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; +import { DatasetResponse } from "./DatasetResponse"; + +export const AgentLinkedFileResponseFile: core.serialization.Schema< + serializers.AgentLinkedFileResponseFile.Raw, + Humanloop.AgentLinkedFileResponseFile +> = core.serialization.undiscriminatedUnion([ + core.serialization.lazyObject(() => serializers.PromptResponse), + core.serialization.lazyObject(() => serializers.ToolResponse), + DatasetResponse, + core.serialization.lazyObject(() => serializers.EvaluatorResponse), + core.serialization.lazyObject(() => serializers.FlowResponse), + core.serialization.lazyObject(() => serializers.AgentResponse), +]); + +export declare namespace AgentLinkedFileResponseFile { + export type Raw = + | serializers.PromptResponse.Raw + | serializers.ToolResponse.Raw + | DatasetResponse.Raw + | serializers.EvaluatorResponse.Raw + | serializers.FlowResponse.Raw + | serializers.AgentResponse.Raw; +} diff --git a/src/serialization/types/AgentLogResponse.ts b/src/serialization/types/AgentLogResponse.ts new file mode 100644 index 00000000..6146f45d --- /dev/null +++ b/src/serialization/types/AgentLogResponse.ts @@ -0,0 +1,103 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; +import { ChatMessage } from "./ChatMessage"; +import { AgentLogResponseToolChoice } from "./AgentLogResponseToolChoice"; +import { LogStatus } from "./LogStatus"; + +export const AgentLogResponse: core.serialization.ObjectSchema< + serializers.AgentLogResponse.Raw, + Humanloop.AgentLogResponse +> = core.serialization.object({ + outputMessage: core.serialization.property("output_message", ChatMessage.optional()), + promptTokens: core.serialization.property("prompt_tokens", core.serialization.number().optional()), + reasoningTokens: core.serialization.property("reasoning_tokens", core.serialization.number().optional()), + outputTokens: core.serialization.property("output_tokens", core.serialization.number().optional()), + promptCost: core.serialization.property("prompt_cost", core.serialization.number().optional()), + outputCost: core.serialization.property("output_cost", core.serialization.number().optional()), + finishReason: core.serialization.property("finish_reason", core.serialization.string().optional()), + messages: core.serialization.list(ChatMessage).optional(), + toolChoice: core.serialization.property("tool_choice", AgentLogResponseToolChoice.optional()), + agent: core.serialization.lazyObject(() => serializers.AgentResponse), + startTime: core.serialization.property("start_time", core.serialization.date().optional()), + endTime: core.serialization.property("end_time", core.serialization.date().optional()), + output: core.serialization.string().optional(), + createdAt: core.serialization.property("created_at", core.serialization.date().optional()), + error: core.serialization.string().optional(), + providerLatency: core.serialization.property("provider_latency", core.serialization.number().optional()), + stdout: core.serialization.string().optional(), + providerRequest: core.serialization.property( + "provider_request", + core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), + ), + providerResponse: core.serialization.property( + "provider_response", + core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), + ), + inputs: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), + source: core.serialization.string().optional(), + metadata: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), + logStatus: core.serialization.property("log_status", LogStatus.optional()), + sourceDatapointId: core.serialization.property("source_datapoint_id", core.serialization.string().optional()), + traceParentId: core.serialization.property("trace_parent_id", core.serialization.string().optional()), + batches: core.serialization.list(core.serialization.string()).optional(), + user: core.serialization.string().optional(), + environment: core.serialization.string().optional(), + save: core.serialization.boolean().optional(), + logId: core.serialization.property("log_id", core.serialization.string().optional()), + id: core.serialization.string(), + evaluatorLogs: core.serialization.property( + "evaluator_logs", + core.serialization.list(core.serialization.lazyObject(() => serializers.EvaluatorLogResponse)), + ), + traceFlowId: core.serialization.property("trace_flow_id", core.serialization.string().optional()), + traceId: core.serialization.property("trace_id", core.serialization.string().optional()), + traceChildren: core.serialization.property( + "trace_children", + core.serialization.list(core.serialization.lazy(() => serializers.LogResponse)).optional(), + ), +}); + +export declare namespace AgentLogResponse { + export interface Raw { + output_message?: ChatMessage.Raw | null; + prompt_tokens?: number | null; + reasoning_tokens?: number | null; + output_tokens?: number | null; + prompt_cost?: number | null; + output_cost?: number | null; + finish_reason?: string | null; + messages?: ChatMessage.Raw[] | null; + tool_choice?: AgentLogResponseToolChoice.Raw | null; + agent: serializers.AgentResponse.Raw; + start_time?: string | null; + end_time?: string | null; + output?: string | null; + created_at?: string | null; + error?: string | null; + provider_latency?: number | null; + stdout?: string | null; + provider_request?: Record | null; + provider_response?: Record | null; + inputs?: Record | null; + source?: string | null; + metadata?: Record | null; + log_status?: LogStatus.Raw | null; + source_datapoint_id?: string | null; + trace_parent_id?: string | null; + batches?: string[] | null; + user?: string | null; + environment?: string | null; + save?: boolean | null; + log_id?: string | null; + id: string; + evaluator_logs: serializers.EvaluatorLogResponse.Raw[]; + trace_flow_id?: string | null; + trace_id?: string | null; + trace_children?: serializers.LogResponse.Raw[] | null; + } +} diff --git a/src/serialization/types/AgentLogResponseToolChoice.ts b/src/serialization/types/AgentLogResponseToolChoice.ts new file mode 100644 index 00000000..d5938949 --- /dev/null +++ b/src/serialization/types/AgentLogResponseToolChoice.ts @@ -0,0 +1,22 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; +import { ToolChoice } from "./ToolChoice"; + +export const AgentLogResponseToolChoice: core.serialization.Schema< + serializers.AgentLogResponseToolChoice.Raw, + Humanloop.AgentLogResponseToolChoice +> = core.serialization.undiscriminatedUnion([ + core.serialization.stringLiteral("none"), + core.serialization.stringLiteral("auto"), + core.serialization.stringLiteral("required"), + ToolChoice, +]); + +export declare namespace AgentLogResponseToolChoice { + export type Raw = "none" | "auto" | "required" | ToolChoice.Raw; +} diff --git a/src/serialization/types/AgentLogStreamResponse.ts b/src/serialization/types/AgentLogStreamResponse.ts new file mode 100644 index 00000000..0ca2621e --- /dev/null +++ b/src/serialization/types/AgentLogStreamResponse.ts @@ -0,0 +1,49 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; +import { ChatMessage } from "./ChatMessage"; + +export const AgentLogStreamResponse: core.serialization.ObjectSchema< + serializers.AgentLogStreamResponse.Raw, + Humanloop.AgentLogStreamResponse +> = core.serialization.object({ + output: core.serialization.string().optional(), + createdAt: core.serialization.property("created_at", core.serialization.date().optional()), + error: core.serialization.string().optional(), + providerLatency: core.serialization.property("provider_latency", core.serialization.number().optional()), + stdout: core.serialization.string().optional(), + outputMessage: core.serialization.property("output_message", ChatMessage.optional()), + promptTokens: core.serialization.property("prompt_tokens", core.serialization.number().optional()), + reasoningTokens: core.serialization.property("reasoning_tokens", core.serialization.number().optional()), + outputTokens: core.serialization.property("output_tokens", core.serialization.number().optional()), + promptCost: core.serialization.property("prompt_cost", core.serialization.number().optional()), + outputCost: core.serialization.property("output_cost", core.serialization.number().optional()), + finishReason: core.serialization.property("finish_reason", core.serialization.string().optional()), + id: core.serialization.string(), + agentId: core.serialization.property("agent_id", core.serialization.string()), + versionId: core.serialization.property("version_id", core.serialization.string()), +}); + +export declare namespace AgentLogStreamResponse { + export interface Raw { + output?: string | null; + created_at?: string | null; + error?: string | null; + provider_latency?: number | null; + stdout?: string | null; + output_message?: ChatMessage.Raw | null; + prompt_tokens?: number | null; + reasoning_tokens?: number | null; + output_tokens?: number | null; + prompt_cost?: number | null; + output_cost?: number | null; + finish_reason?: string | null; + id: string; + agent_id: string; + version_id: string; + } +} diff --git a/src/serialization/types/AgentResponse.ts b/src/serialization/types/AgentResponse.ts new file mode 100644 index 00000000..f9b96ccf --- /dev/null +++ b/src/serialization/types/AgentResponse.ts @@ -0,0 +1,119 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; +import { ModelEndpoints } from "./ModelEndpoints"; +import { AgentResponseTemplate } from "./AgentResponseTemplate"; +import { TemplateLanguage } from "./TemplateLanguage"; +import { ModelProviders } from "./ModelProviders"; +import { AgentResponseStop } from "./AgentResponseStop"; +import { ResponseFormat } from "./ResponseFormat"; +import { AgentResponseReasoningEffort } from "./AgentResponseReasoningEffort"; +import { EnvironmentResponse } from "./EnvironmentResponse"; +import { UserResponse } from "./UserResponse"; +import { VersionStatus } from "./VersionStatus"; +import { InputResponse } from "./InputResponse"; +import { EvaluatorAggregate } from "./EvaluatorAggregate"; + +export const AgentResponse: core.serialization.ObjectSchema = + core.serialization.object({ + path: core.serialization.string(), + id: core.serialization.string(), + directoryId: core.serialization.property("directory_id", core.serialization.string().optional()), + model: core.serialization.string(), + endpoint: ModelEndpoints.optional(), + template: AgentResponseTemplate.optional(), + templateLanguage: core.serialization.property("template_language", TemplateLanguage.optional()), + provider: ModelProviders.optional(), + maxTokens: core.serialization.property("max_tokens", core.serialization.number().optional()), + temperature: core.serialization.number().optional(), + topP: core.serialization.property("top_p", core.serialization.number().optional()), + stop: AgentResponseStop.optional(), + presencePenalty: core.serialization.property("presence_penalty", core.serialization.number().optional()), + frequencyPenalty: core.serialization.property("frequency_penalty", core.serialization.number().optional()), + other: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), + seed: core.serialization.number().optional(), + responseFormat: core.serialization.property("response_format", ResponseFormat.optional()), + reasoningEffort: core.serialization.property("reasoning_effort", AgentResponseReasoningEffort.optional()), + tools: core.serialization.list(core.serialization.lazy(() => serializers.AgentResponseToolsItem)), + attributes: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), + maxIterations: core.serialization.property("max_iterations", core.serialization.number().optional()), + versionName: core.serialization.property("version_name", core.serialization.string().optional()), + versionDescription: core.serialization.property("version_description", core.serialization.string().optional()), + description: core.serialization.string().optional(), + tags: core.serialization.list(core.serialization.string()).optional(), + readme: core.serialization.string().optional(), + name: core.serialization.string(), + schema: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), + versionId: core.serialization.property("version_id", core.serialization.string()), + type: core.serialization.stringLiteral("agent").optional(), + environments: core.serialization.list(EnvironmentResponse).optional(), + createdAt: core.serialization.property("created_at", core.serialization.date()), + updatedAt: core.serialization.property("updated_at", core.serialization.date()), + createdBy: core.serialization.property("created_by", UserResponse.optional()), + committedBy: core.serialization.property("committed_by", UserResponse.optional()), + committedAt: core.serialization.property("committed_at", core.serialization.date().optional()), + status: VersionStatus, + lastUsedAt: core.serialization.property("last_used_at", core.serialization.date()), + versionLogsCount: core.serialization.property("version_logs_count", core.serialization.number()), + totalLogsCount: core.serialization.property("total_logs_count", core.serialization.number()), + inputs: core.serialization.list(InputResponse), + evaluators: core.serialization + .list(core.serialization.lazyObject(() => serializers.MonitoringEvaluatorResponse)) + .optional(), + evaluatorAggregates: core.serialization.property( + "evaluator_aggregates", + core.serialization.list(EvaluatorAggregate).optional(), + ), + }); + +export declare namespace AgentResponse { + export interface Raw { + path: string; + id: string; + directory_id?: string | null; + model: string; + endpoint?: ModelEndpoints.Raw | null; + template?: AgentResponseTemplate.Raw | null; + template_language?: TemplateLanguage.Raw | null; + provider?: ModelProviders.Raw | null; + max_tokens?: number | null; + temperature?: number | null; + top_p?: number | null; + stop?: AgentResponseStop.Raw | null; + presence_penalty?: number | null; + frequency_penalty?: number | null; + other?: Record | null; + seed?: number | null; + response_format?: ResponseFormat.Raw | null; + reasoning_effort?: AgentResponseReasoningEffort.Raw | null; + tools: serializers.AgentResponseToolsItem.Raw[]; + attributes?: Record | null; + max_iterations?: number | null; + version_name?: string | null; + version_description?: string | null; + description?: string | null; + tags?: string[] | null; + readme?: string | null; + name: string; + schema?: Record | null; + version_id: string; + type?: "agent" | null; + environments?: EnvironmentResponse.Raw[] | null; + created_at: string; + updated_at: string; + created_by?: (UserResponse.Raw | undefined) | null; + committed_by?: (UserResponse.Raw | undefined) | null; + committed_at?: string | null; + status: VersionStatus.Raw; + last_used_at: string; + version_logs_count: number; + total_logs_count: number; + inputs: InputResponse.Raw[]; + evaluators?: serializers.MonitoringEvaluatorResponse.Raw[] | null; + evaluator_aggregates?: EvaluatorAggregate.Raw[] | null; + } +} diff --git a/src/serialization/types/AgentResponseReasoningEffort.ts b/src/serialization/types/AgentResponseReasoningEffort.ts new file mode 100644 index 00000000..5a01ff8b --- /dev/null +++ b/src/serialization/types/AgentResponseReasoningEffort.ts @@ -0,0 +1,17 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; +import { OpenAiReasoningEffort } from "./OpenAiReasoningEffort"; + +export const AgentResponseReasoningEffort: core.serialization.Schema< + serializers.AgentResponseReasoningEffort.Raw, + Humanloop.AgentResponseReasoningEffort +> = core.serialization.undiscriminatedUnion([OpenAiReasoningEffort, core.serialization.number()]); + +export declare namespace AgentResponseReasoningEffort { + export type Raw = OpenAiReasoningEffort.Raw | number; +} diff --git a/src/serialization/types/AgentResponseStop.ts b/src/serialization/types/AgentResponseStop.ts new file mode 100644 index 00000000..43ae0f86 --- /dev/null +++ b/src/serialization/types/AgentResponseStop.ts @@ -0,0 +1,19 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; + +export const AgentResponseStop: core.serialization.Schema< + serializers.AgentResponseStop.Raw, + Humanloop.AgentResponseStop +> = core.serialization.undiscriminatedUnion([ + core.serialization.string(), + core.serialization.list(core.serialization.string()), +]); + +export declare namespace AgentResponseStop { + export type Raw = string | string[]; +} diff --git a/src/serialization/types/AgentResponseTemplate.ts b/src/serialization/types/AgentResponseTemplate.ts new file mode 100644 index 00000000..b553711d --- /dev/null +++ b/src/serialization/types/AgentResponseTemplate.ts @@ -0,0 +1,17 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; +import { ChatMessage } from "./ChatMessage"; + +export const AgentResponseTemplate: core.serialization.Schema< + serializers.AgentResponseTemplate.Raw, + Humanloop.AgentResponseTemplate +> = core.serialization.undiscriminatedUnion([core.serialization.string(), core.serialization.list(ChatMessage)]); + +export declare namespace AgentResponseTemplate { + export type Raw = string | ChatMessage.Raw[]; +} diff --git a/src/serialization/types/AgentResponseToolsItem.ts b/src/serialization/types/AgentResponseToolsItem.ts new file mode 100644 index 00000000..92fbf291 --- /dev/null +++ b/src/serialization/types/AgentResponseToolsItem.ts @@ -0,0 +1,20 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; +import { AgentInlineTool } from "./AgentInlineTool"; + +export const AgentResponseToolsItem: core.serialization.Schema< + serializers.AgentResponseToolsItem.Raw, + Humanloop.AgentResponseToolsItem +> = core.serialization.undiscriminatedUnion([ + core.serialization.lazyObject(() => serializers.AgentLinkedFileResponse), + AgentInlineTool, +]); + +export declare namespace AgentResponseToolsItem { + export type Raw = serializers.AgentLinkedFileResponse.Raw | AgentInlineTool.Raw; +} diff --git a/src/serialization/types/AnthropicRedactedThinkingContent.ts b/src/serialization/types/AnthropicRedactedThinkingContent.ts new file mode 100644 index 00000000..92aa3331 --- /dev/null +++ b/src/serialization/types/AnthropicRedactedThinkingContent.ts @@ -0,0 +1,22 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; + +export const AnthropicRedactedThinkingContent: core.serialization.ObjectSchema< + serializers.AnthropicRedactedThinkingContent.Raw, + Humanloop.AnthropicRedactedThinkingContent +> = core.serialization.object({ + type: core.serialization.stringLiteral("redacted_thinking"), + data: core.serialization.string(), +}); + +export declare namespace AnthropicRedactedThinkingContent { + export interface Raw { + type: "redacted_thinking"; + data: string; + } +} diff --git a/src/serialization/types/AnthropicThinkingContent.ts b/src/serialization/types/AnthropicThinkingContent.ts new file mode 100644 index 00000000..e0aa3c51 --- /dev/null +++ b/src/serialization/types/AnthropicThinkingContent.ts @@ -0,0 +1,24 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; + +export const AnthropicThinkingContent: core.serialization.ObjectSchema< + serializers.AnthropicThinkingContent.Raw, + Humanloop.AnthropicThinkingContent +> = core.serialization.object({ + type: core.serialization.stringLiteral("thinking"), + thinking: core.serialization.string(), + signature: core.serialization.string(), +}); + +export declare namespace AnthropicThinkingContent { + export interface Raw { + type: "thinking"; + thinking: string; + signature: string; + } +} diff --git a/src/serialization/types/ChatMessage.ts b/src/serialization/types/ChatMessage.ts index 29ea1a62..c7097645 100644 --- a/src/serialization/types/ChatMessage.ts +++ b/src/serialization/types/ChatMessage.ts @@ -8,6 +8,7 @@ import * as core from "../../core"; import { ChatMessageContent } from "./ChatMessageContent"; import { ChatRole } from "./ChatRole"; import { ToolCall } from "./ToolCall"; +import { ChatMessageThinkingItem } from "./ChatMessageThinkingItem"; export const ChatMessage: core.serialization.ObjectSchema = core.serialization.object({ @@ -16,6 +17,7 @@ export const ChatMessage: core.serialization.ObjectSchema = core.serialization.undiscriminatedUnion([AnthropicThinkingContent, AnthropicRedactedThinkingContent]); + +export declare namespace ChatMessageThinkingItem { + export type Raw = AnthropicThinkingContent.Raw | AnthropicRedactedThinkingContent.Raw; +} diff --git a/src/serialization/types/CreateAgentLogResponse.ts b/src/serialization/types/CreateAgentLogResponse.ts new file mode 100644 index 00000000..36721051 --- /dev/null +++ b/src/serialization/types/CreateAgentLogResponse.ts @@ -0,0 +1,27 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; +import { LogStatus } from "./LogStatus"; + +export const CreateAgentLogResponse: core.serialization.ObjectSchema< + serializers.CreateAgentLogResponse.Raw, + Humanloop.CreateAgentLogResponse +> = core.serialization.object({ + id: core.serialization.string(), + agentId: core.serialization.property("agent_id", core.serialization.string()), + versionId: core.serialization.property("version_id", core.serialization.string()), + logStatus: core.serialization.property("log_status", LogStatus.optional()), +}); + +export declare namespace CreateAgentLogResponse { + export interface Raw { + id: string; + agent_id: string; + version_id: string; + log_status?: LogStatus.Raw | null; + } +} diff --git a/src/serialization/types/DatasetResponse.ts b/src/serialization/types/DatasetResponse.ts index 59b8c91f..83b2bc77 100644 --- a/src/serialization/types/DatasetResponse.ts +++ b/src/serialization/types/DatasetResponse.ts @@ -18,6 +18,7 @@ export const DatasetResponse: core.serialization.ObjectSchema< directoryId: core.serialization.property("directory_id", core.serialization.string().optional()), name: core.serialization.string(), description: core.serialization.string().optional(), + schema: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), readme: core.serialization.string().optional(), tags: core.serialization.list(core.serialization.string()).optional(), versionId: core.serialization.property("version_id", core.serialization.string()), @@ -41,6 +42,7 @@ export declare namespace DatasetResponse { directory_id?: string | null; name: string; description?: string | null; + schema?: Record | null; readme?: string | null; tags?: string[] | null; version_id: string; diff --git a/src/serialization/types/DirectoryWithParentsAndChildrenResponseFilesItem.ts b/src/serialization/types/DirectoryWithParentsAndChildrenResponseFilesItem.ts index a8765b7a..af558948 100644 --- a/src/serialization/types/DirectoryWithParentsAndChildrenResponseFilesItem.ts +++ b/src/serialization/types/DirectoryWithParentsAndChildrenResponseFilesItem.ts @@ -16,6 +16,7 @@ export const DirectoryWithParentsAndChildrenResponseFilesItem: core.serializatio core.serialization.lazyObject(() => serializers.EvaluatorResponse), DatasetResponse, core.serialization.lazyObject(() => serializers.FlowResponse), + core.serialization.lazyObject(() => serializers.AgentResponse), ]); export declare namespace DirectoryWithParentsAndChildrenResponseFilesItem { @@ -24,5 +25,6 @@ export declare namespace DirectoryWithParentsAndChildrenResponseFilesItem { | serializers.ToolResponse.Raw | serializers.EvaluatorResponse.Raw | DatasetResponse.Raw - | serializers.FlowResponse.Raw; + | serializers.FlowResponse.Raw + | serializers.AgentResponse.Raw; } diff --git a/src/serialization/types/EvaluatorResponse.ts b/src/serialization/types/EvaluatorResponse.ts index e4fdf121..fec7be8d 100644 --- a/src/serialization/types/EvaluatorResponse.ts +++ b/src/serialization/types/EvaluatorResponse.ts @@ -23,6 +23,7 @@ export const EvaluatorResponse: core.serialization.ObjectSchema< spec: EvaluatorResponseSpec, name: core.serialization.string(), description: core.serialization.string().optional(), + schema: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), readme: core.serialization.string().optional(), tags: core.serialization.list(core.serialization.string()).optional(), versionId: core.serialization.property("version_id", core.serialization.string()), @@ -55,6 +56,7 @@ export declare namespace EvaluatorResponse { spec: EvaluatorResponseSpec.Raw; name: string; description?: string | null; + schema?: Record | null; readme?: string | null; tags?: string[] | null; version_id: string; diff --git a/src/serialization/types/EventType.ts b/src/serialization/types/EventType.ts new file mode 100644 index 00000000..b7d38e57 --- /dev/null +++ b/src/serialization/types/EventType.ts @@ -0,0 +1,39 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; + +export const EventType: core.serialization.Schema = + core.serialization.enum_([ + "agent_turn_start", + "agent_turn_suspend", + "agent_turn_continue", + "agent_turn_end", + "agent_start", + "agent_update", + "agent_end", + "tool_start", + "tool_update", + "tool_end", + "error", + "agent_generation_error", + ]); + +export declare namespace EventType { + export type Raw = + | "agent_turn_start" + | "agent_turn_suspend" + | "agent_turn_continue" + | "agent_turn_end" + | "agent_start" + | "agent_update" + | "agent_end" + | "tool_start" + | "tool_update" + | "tool_end" + | "error" + | "agent_generation_error"; +} diff --git a/src/serialization/types/FileEnvironmentResponseFile.ts b/src/serialization/types/FileEnvironmentResponseFile.ts index 3d7f2228..805ce324 100644 --- a/src/serialization/types/FileEnvironmentResponseFile.ts +++ b/src/serialization/types/FileEnvironmentResponseFile.ts @@ -16,6 +16,7 @@ export const FileEnvironmentResponseFile: core.serialization.Schema< DatasetResponse, core.serialization.lazyObject(() => serializers.EvaluatorResponse), core.serialization.lazyObject(() => serializers.FlowResponse), + core.serialization.lazyObject(() => serializers.AgentResponse), ]); export declare namespace FileEnvironmentResponseFile { @@ -24,5 +25,6 @@ export declare namespace FileEnvironmentResponseFile { | serializers.ToolResponse.Raw | DatasetResponse.Raw | serializers.EvaluatorResponse.Raw - | serializers.FlowResponse.Raw; + | serializers.FlowResponse.Raw + | serializers.AgentResponse.Raw; } diff --git a/src/serialization/types/FileEnvironmentVariableRequest.ts b/src/serialization/types/FileEnvironmentVariableRequest.ts new file mode 100644 index 00000000..4c3f3e47 --- /dev/null +++ b/src/serialization/types/FileEnvironmentVariableRequest.ts @@ -0,0 +1,22 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; + +export const FileEnvironmentVariableRequest: core.serialization.ObjectSchema< + serializers.FileEnvironmentVariableRequest.Raw, + Humanloop.FileEnvironmentVariableRequest +> = core.serialization.object({ + name: core.serialization.string(), + value: core.serialization.string(), +}); + +export declare namespace FileEnvironmentVariableRequest { + export interface Raw { + name: string; + value: string; + } +} diff --git a/src/serialization/types/FileType.ts b/src/serialization/types/FileType.ts index b26c954d..c426d30e 100644 --- a/src/serialization/types/FileType.ts +++ b/src/serialization/types/FileType.ts @@ -7,8 +7,8 @@ import * as Humanloop from "../../api/index"; import * as core from "../../core"; export const FileType: core.serialization.Schema = - core.serialization.enum_(["prompt", "tool", "dataset", "evaluator", "flow"]); + core.serialization.enum_(["prompt", "tool", "dataset", "evaluator", "flow", "agent"]); export declare namespace FileType { - export type Raw = "prompt" | "tool" | "dataset" | "evaluator" | "flow"; + export type Raw = "prompt" | "tool" | "dataset" | "evaluator" | "flow" | "agent"; } diff --git a/src/serialization/types/FilesToolType.ts b/src/serialization/types/FilesToolType.ts index 81dd79bb..3e3c684d 100644 --- a/src/serialization/types/FilesToolType.ts +++ b/src/serialization/types/FilesToolType.ts @@ -7,8 +7,8 @@ import * as Humanloop from "../../api/index"; import * as core from "../../core"; export const FilesToolType: core.serialization.Schema = - core.serialization.enum_(["pinecone_search", "google", "mock", "snippet", "json_schema", "get_api_call"]); + core.serialization.enum_(["pinecone_search", "google", "mock", "snippet", "json_schema", "get_api_call", "python"]); export declare namespace FilesToolType { - export type Raw = "pinecone_search" | "google" | "mock" | "snippet" | "json_schema" | "get_api_call"; + export type Raw = "pinecone_search" | "google" | "mock" | "snippet" | "json_schema" | "get_api_call" | "python"; } diff --git a/src/serialization/types/FlowResponse.ts b/src/serialization/types/FlowResponse.ts index 9f0a17b6..67bfda5a 100644 --- a/src/serialization/types/FlowResponse.ts +++ b/src/serialization/types/FlowResponse.ts @@ -19,6 +19,7 @@ export const FlowResponse: core.serialization.ObjectSchema | null; readme?: string | null; tags?: string[] | null; version_id: string; diff --git a/src/serialization/types/LinkedFileRequest.ts b/src/serialization/types/LinkedFileRequest.ts new file mode 100644 index 00000000..335b8404 --- /dev/null +++ b/src/serialization/types/LinkedFileRequest.ts @@ -0,0 +1,24 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; + +export const LinkedFileRequest: core.serialization.ObjectSchema< + serializers.LinkedFileRequest.Raw, + Humanloop.LinkedFileRequest +> = core.serialization.object({ + fileId: core.serialization.property("file_id", core.serialization.string()), + environmentId: core.serialization.property("environment_id", core.serialization.string().optional()), + versionId: core.serialization.property("version_id", core.serialization.string().optional()), +}); + +export declare namespace LinkedFileRequest { + export interface Raw { + file_id: string; + environment_id?: string | null; + version_id?: string | null; + } +} diff --git a/src/serialization/types/ListAgents.ts b/src/serialization/types/ListAgents.ts new file mode 100644 index 00000000..2fe544a1 --- /dev/null +++ b/src/serialization/types/ListAgents.ts @@ -0,0 +1,18 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; + +export const ListAgents: core.serialization.ObjectSchema = + core.serialization.object({ + records: core.serialization.list(core.serialization.lazyObject(() => serializers.AgentResponse)), + }); + +export declare namespace ListAgents { + export interface Raw { + records: serializers.AgentResponse.Raw[]; + } +} diff --git a/src/serialization/types/LogResponse.ts b/src/serialization/types/LogResponse.ts index 7d62b536..3d43469a 100644 --- a/src/serialization/types/LogResponse.ts +++ b/src/serialization/types/LogResponse.ts @@ -12,6 +12,7 @@ export const LogResponse: core.serialization.Schema serializers.ToolLogResponse), core.serialization.lazyObject(() => serializers.EvaluatorLogResponse), core.serialization.lazyObject(() => serializers.FlowLogResponse), + core.serialization.lazyObject(() => serializers.AgentLogResponse), ]); export declare namespace LogResponse { @@ -19,5 +20,6 @@ export declare namespace LogResponse { | serializers.PromptLogResponse.Raw | serializers.ToolLogResponse.Raw | serializers.EvaluatorLogResponse.Raw - | serializers.FlowLogResponse.Raw; + | serializers.FlowLogResponse.Raw + | serializers.AgentLogResponse.Raw; } diff --git a/src/serialization/types/LogStreamResponse.ts b/src/serialization/types/LogStreamResponse.ts new file mode 100644 index 00000000..dc6696da --- /dev/null +++ b/src/serialization/types/LogStreamResponse.ts @@ -0,0 +1,18 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; +import { PromptCallStreamResponse } from "./PromptCallStreamResponse"; +import { AgentLogStreamResponse } from "./AgentLogStreamResponse"; + +export const LogStreamResponse: core.serialization.Schema< + serializers.LogStreamResponse.Raw, + Humanloop.LogStreamResponse +> = core.serialization.undiscriminatedUnion([PromptCallStreamResponse, AgentLogStreamResponse]); + +export declare namespace LogStreamResponse { + export type Raw = PromptCallStreamResponse.Raw | AgentLogStreamResponse.Raw; +} diff --git a/src/serialization/types/ModelProviders.ts b/src/serialization/types/ModelProviders.ts index 7d9258cf..c35a7e3b 100644 --- a/src/serialization/types/ModelProviders.ts +++ b/src/serialization/types/ModelProviders.ts @@ -8,28 +8,28 @@ import * as core from "../../core"; export const ModelProviders: core.serialization.Schema = core.serialization.enum_([ - "openai", - "openai_azure", - "mock", "anthropic", "bedrock", "cohere", - "replicate", + "deepseek", "google", "groq", - "deepseek", + "mock", + "openai", + "openai_azure", + "replicate", ]); export declare namespace ModelProviders { export type Raw = - | "openai" - | "openai_azure" - | "mock" | "anthropic" | "bedrock" | "cohere" - | "replicate" + | "deepseek" | "google" | "groq" - | "deepseek"; + | "mock" + | "openai" + | "openai_azure" + | "replicate"; } diff --git a/src/serialization/types/OnAgentCallEnum.ts b/src/serialization/types/OnAgentCallEnum.ts new file mode 100644 index 00000000..60b39aa7 --- /dev/null +++ b/src/serialization/types/OnAgentCallEnum.ts @@ -0,0 +1,14 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; + +export const OnAgentCallEnum: core.serialization.Schema = + core.serialization.enum_(["stop", "continue"]); + +export declare namespace OnAgentCallEnum { + export type Raw = "stop" | "continue"; +} diff --git a/src/serialization/types/ReasoningEffort.ts b/src/serialization/types/OpenAiReasoningEffort.ts similarity index 50% rename from src/serialization/types/ReasoningEffort.ts rename to src/serialization/types/OpenAiReasoningEffort.ts index 17b327f4..64f74c71 100644 --- a/src/serialization/types/ReasoningEffort.ts +++ b/src/serialization/types/OpenAiReasoningEffort.ts @@ -6,9 +6,11 @@ import * as serializers from "../index"; import * as Humanloop from "../../api/index"; import * as core from "../../core"; -export const ReasoningEffort: core.serialization.Schema = - core.serialization.enum_(["high", "medium", "low"]); +export const OpenAiReasoningEffort: core.serialization.Schema< + serializers.OpenAiReasoningEffort.Raw, + Humanloop.OpenAiReasoningEffort +> = core.serialization.enum_(["high", "medium", "low"]); -export declare namespace ReasoningEffort { +export declare namespace OpenAiReasoningEffort { export type Raw = "high" | "medium" | "low"; } diff --git a/src/serialization/types/PaginatedDataAgentResponse.ts b/src/serialization/types/PaginatedDataAgentResponse.ts new file mode 100644 index 00000000..d6fe084c --- /dev/null +++ b/src/serialization/types/PaginatedDataAgentResponse.ts @@ -0,0 +1,26 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; + +export const PaginatedDataAgentResponse: core.serialization.ObjectSchema< + serializers.PaginatedDataAgentResponse.Raw, + Humanloop.PaginatedDataAgentResponse +> = core.serialization.object({ + records: core.serialization.list(core.serialization.lazyObject(() => serializers.AgentResponse)), + page: core.serialization.number(), + size: core.serialization.number(), + total: core.serialization.number(), +}); + +export declare namespace PaginatedDataAgentResponse { + export interface Raw { + records: serializers.AgentResponse.Raw[]; + page: number; + size: number; + total: number; + } +} diff --git a/src/serialization/types/PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse.ts b/src/serialization/types/PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse.ts similarity index 62% rename from src/serialization/types/PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse.ts rename to src/serialization/types/PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse.ts index 1a62fe64..baab6b5c 100644 --- a/src/serialization/types/PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse.ts +++ b/src/serialization/types/PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse.ts @@ -5,23 +5,23 @@ import * as serializers from "../index"; import * as Humanloop from "../../api/index"; import * as core from "../../core"; -import { PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem } from "./PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem"; +import { PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem } from "./PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem"; -export const PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse: core.serialization.ObjectSchema< - serializers.PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse.Raw, - Humanloop.PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse +export const PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse: core.serialization.ObjectSchema< + serializers.PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse.Raw, + Humanloop.PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse > = core.serialization.object({ records: core.serialization.list( - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem, + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem, ), page: core.serialization.number(), size: core.serialization.number(), total: core.serialization.number(), }); -export declare namespace PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse { +export declare namespace PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse { export interface Raw { - records: PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem.Raw[]; + records: PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem.Raw[]; page: number; size: number; total: number; diff --git a/src/serialization/types/PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem.ts b/src/serialization/types/PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem.ts similarity index 69% rename from src/serialization/types/PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem.ts rename to src/serialization/types/PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem.ts index 6013cb4b..c4c3188a 100644 --- a/src/serialization/types/PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem.ts +++ b/src/serialization/types/PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem.ts @@ -7,22 +7,24 @@ import * as Humanloop from "../../api/index"; import * as core from "../../core"; import { DatasetResponse } from "./DatasetResponse"; -export const PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem: core.serialization.Schema< - serializers.PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem.Raw, - Humanloop.PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem +export const PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem: core.serialization.Schema< + serializers.PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem.Raw, + Humanloop.PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem > = core.serialization.undiscriminatedUnion([ core.serialization.lazyObject(() => serializers.PromptResponse), core.serialization.lazyObject(() => serializers.ToolResponse), DatasetResponse, core.serialization.lazyObject(() => serializers.EvaluatorResponse), core.serialization.lazyObject(() => serializers.FlowResponse), + core.serialization.lazyObject(() => serializers.AgentResponse), ]); -export declare namespace PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem { +export declare namespace PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem { export type Raw = | serializers.PromptResponse.Raw | serializers.ToolResponse.Raw | DatasetResponse.Raw | serializers.EvaluatorResponse.Raw - | serializers.FlowResponse.Raw; + | serializers.FlowResponse.Raw + | serializers.AgentResponse.Raw; } diff --git a/src/serialization/types/PopulateTemplateResponse.ts b/src/serialization/types/PopulateTemplateResponse.ts index 21a2dfd3..89574558 100644 --- a/src/serialization/types/PopulateTemplateResponse.ts +++ b/src/serialization/types/PopulateTemplateResponse.ts @@ -11,7 +11,7 @@ import { TemplateLanguage } from "./TemplateLanguage"; import { ModelProviders } from "./ModelProviders"; import { PopulateTemplateResponseStop } from "./PopulateTemplateResponseStop"; import { ResponseFormat } from "./ResponseFormat"; -import { ReasoningEffort } from "./ReasoningEffort"; +import { PopulateTemplateResponseReasoningEffort } from "./PopulateTemplateResponseReasoningEffort"; import { ToolFunction } from "./ToolFunction"; import { LinkedToolResponse } from "./LinkedToolResponse"; import { EnvironmentResponse } from "./EnvironmentResponse"; @@ -41,7 +41,10 @@ export const PopulateTemplateResponse: core.serialization.ObjectSchema< other: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), seed: core.serialization.number().optional(), responseFormat: core.serialization.property("response_format", ResponseFormat.optional()), - reasoningEffort: core.serialization.property("reasoning_effort", ReasoningEffort.optional()), + reasoningEffort: core.serialization.property( + "reasoning_effort", + PopulateTemplateResponseReasoningEffort.optional(), + ), tools: core.serialization.list(ToolFunction).optional(), linkedTools: core.serialization.property("linked_tools", core.serialization.list(LinkedToolResponse).optional()), attributes: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), @@ -51,6 +54,7 @@ export const PopulateTemplateResponse: core.serialization.ObjectSchema< tags: core.serialization.list(core.serialization.string()).optional(), readme: core.serialization.string().optional(), name: core.serialization.string(), + schema: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), versionId: core.serialization.property("version_id", core.serialization.string()), type: core.serialization.stringLiteral("prompt").optional(), environments: core.serialization.list(EnvironmentResponse).optional(), @@ -93,7 +97,7 @@ export declare namespace PopulateTemplateResponse { other?: Record | null; seed?: number | null; response_format?: ResponseFormat.Raw | null; - reasoning_effort?: ReasoningEffort.Raw | null; + reasoning_effort?: PopulateTemplateResponseReasoningEffort.Raw | null; tools?: ToolFunction.Raw[] | null; linked_tools?: LinkedToolResponse.Raw[] | null; attributes?: Record | null; @@ -103,6 +107,7 @@ export declare namespace PopulateTemplateResponse { tags?: string[] | null; readme?: string | null; name: string; + schema?: Record | null; version_id: string; type?: "prompt" | null; environments?: EnvironmentResponse.Raw[] | null; diff --git a/src/serialization/types/PopulateTemplateResponseReasoningEffort.ts b/src/serialization/types/PopulateTemplateResponseReasoningEffort.ts new file mode 100644 index 00000000..d462eda7 --- /dev/null +++ b/src/serialization/types/PopulateTemplateResponseReasoningEffort.ts @@ -0,0 +1,17 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; +import { OpenAiReasoningEffort } from "./OpenAiReasoningEffort"; + +export const PopulateTemplateResponseReasoningEffort: core.serialization.Schema< + serializers.PopulateTemplateResponseReasoningEffort.Raw, + Humanloop.PopulateTemplateResponseReasoningEffort +> = core.serialization.undiscriminatedUnion([OpenAiReasoningEffort, core.serialization.number()]); + +export declare namespace PopulateTemplateResponseReasoningEffort { + export type Raw = OpenAiReasoningEffort.Raw | number; +} diff --git a/src/serialization/types/PromptKernelRequest.ts b/src/serialization/types/PromptKernelRequest.ts index 9b69212c..39805926 100644 --- a/src/serialization/types/PromptKernelRequest.ts +++ b/src/serialization/types/PromptKernelRequest.ts @@ -11,7 +11,7 @@ import { TemplateLanguage } from "./TemplateLanguage"; import { ModelProviders } from "./ModelProviders"; import { PromptKernelRequestStop } from "./PromptKernelRequestStop"; import { ResponseFormat } from "./ResponseFormat"; -import { ReasoningEffort } from "./ReasoningEffort"; +import { PromptKernelRequestReasoningEffort } from "./PromptKernelRequestReasoningEffort"; import { ToolFunction } from "./ToolFunction"; export const PromptKernelRequest: core.serialization.ObjectSchema< @@ -32,7 +32,7 @@ export const PromptKernelRequest: core.serialization.ObjectSchema< other: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), seed: core.serialization.number().optional(), responseFormat: core.serialization.property("response_format", ResponseFormat.optional()), - reasoningEffort: core.serialization.property("reasoning_effort", ReasoningEffort.optional()), + reasoningEffort: core.serialization.property("reasoning_effort", PromptKernelRequestReasoningEffort.optional()), tools: core.serialization.list(ToolFunction).optional(), linkedTools: core.serialization.property( "linked_tools", @@ -57,7 +57,7 @@ export declare namespace PromptKernelRequest { other?: Record | null; seed?: number | null; response_format?: ResponseFormat.Raw | null; - reasoning_effort?: ReasoningEffort.Raw | null; + reasoning_effort?: PromptKernelRequestReasoningEffort.Raw | null; tools?: ToolFunction.Raw[] | null; linked_tools?: string[] | null; attributes?: Record | null; diff --git a/src/serialization/types/PromptKernelRequestReasoningEffort.ts b/src/serialization/types/PromptKernelRequestReasoningEffort.ts new file mode 100644 index 00000000..89806f2e --- /dev/null +++ b/src/serialization/types/PromptKernelRequestReasoningEffort.ts @@ -0,0 +1,17 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; +import { OpenAiReasoningEffort } from "./OpenAiReasoningEffort"; + +export const PromptKernelRequestReasoningEffort: core.serialization.Schema< + serializers.PromptKernelRequestReasoningEffort.Raw, + Humanloop.PromptKernelRequestReasoningEffort +> = core.serialization.undiscriminatedUnion([OpenAiReasoningEffort, core.serialization.number()]); + +export declare namespace PromptKernelRequestReasoningEffort { + export type Raw = OpenAiReasoningEffort.Raw | number; +} diff --git a/src/serialization/types/PromptResponse.ts b/src/serialization/types/PromptResponse.ts index e6a5cdc5..04764187 100644 --- a/src/serialization/types/PromptResponse.ts +++ b/src/serialization/types/PromptResponse.ts @@ -11,7 +11,7 @@ import { TemplateLanguage } from "./TemplateLanguage"; import { ModelProviders } from "./ModelProviders"; import { PromptResponseStop } from "./PromptResponseStop"; import { ResponseFormat } from "./ResponseFormat"; -import { ReasoningEffort } from "./ReasoningEffort"; +import { PromptResponseReasoningEffort } from "./PromptResponseReasoningEffort"; import { ToolFunction } from "./ToolFunction"; import { LinkedToolResponse } from "./LinkedToolResponse"; import { EnvironmentResponse } from "./EnvironmentResponse"; @@ -38,7 +38,7 @@ export const PromptResponse: core.serialization.ObjectSchema | null; seed?: number | null; response_format?: ResponseFormat.Raw | null; - reasoning_effort?: ReasoningEffort.Raw | null; + reasoning_effort?: PromptResponseReasoningEffort.Raw | null; tools?: ToolFunction.Raw[] | null; linked_tools?: LinkedToolResponse.Raw[] | null; attributes?: Record | null; @@ -99,6 +100,7 @@ export declare namespace PromptResponse { tags?: string[] | null; readme?: string | null; name: string; + schema?: Record | null; version_id: string; type?: "prompt" | null; environments?: EnvironmentResponse.Raw[] | null; diff --git a/src/serialization/types/PromptResponseReasoningEffort.ts b/src/serialization/types/PromptResponseReasoningEffort.ts new file mode 100644 index 00000000..7c602915 --- /dev/null +++ b/src/serialization/types/PromptResponseReasoningEffort.ts @@ -0,0 +1,17 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; +import { OpenAiReasoningEffort } from "./OpenAiReasoningEffort"; + +export const PromptResponseReasoningEffort: core.serialization.Schema< + serializers.PromptResponseReasoningEffort.Raw, + Humanloop.PromptResponseReasoningEffort +> = core.serialization.undiscriminatedUnion([OpenAiReasoningEffort, core.serialization.number()]); + +export declare namespace PromptResponseReasoningEffort { + export type Raw = OpenAiReasoningEffort.Raw | number; +} diff --git a/src/serialization/types/RunVersionResponse.ts b/src/serialization/types/RunVersionResponse.ts index 68b48141..b5010bae 100644 --- a/src/serialization/types/RunVersionResponse.ts +++ b/src/serialization/types/RunVersionResponse.ts @@ -14,6 +14,7 @@ export const RunVersionResponse: core.serialization.Schema< core.serialization.lazyObject(() => serializers.ToolResponse), core.serialization.lazyObject(() => serializers.EvaluatorResponse), core.serialization.lazyObject(() => serializers.FlowResponse), + core.serialization.lazyObject(() => serializers.AgentResponse), ]); export declare namespace RunVersionResponse { @@ -21,5 +22,6 @@ export declare namespace RunVersionResponse { | serializers.PromptResponse.Raw | serializers.ToolResponse.Raw | serializers.EvaluatorResponse.Raw - | serializers.FlowResponse.Raw; + | serializers.FlowResponse.Raw + | serializers.AgentResponse.Raw; } diff --git a/src/serialization/types/ToolCallResponse.ts b/src/serialization/types/ToolCallResponse.ts new file mode 100644 index 00000000..ff6eaa55 --- /dev/null +++ b/src/serialization/types/ToolCallResponse.ts @@ -0,0 +1,83 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../index"; +import * as Humanloop from "../../api/index"; +import * as core from "../../core"; +import { LogStatus } from "./LogStatus"; + +export const ToolCallResponse: core.serialization.ObjectSchema< + serializers.ToolCallResponse.Raw, + Humanloop.ToolCallResponse +> = core.serialization.object({ + startTime: core.serialization.property("start_time", core.serialization.date().optional()), + endTime: core.serialization.property("end_time", core.serialization.date().optional()), + tool: core.serialization.lazyObject(() => serializers.ToolResponse), + output: core.serialization.string().optional(), + createdAt: core.serialization.property("created_at", core.serialization.date().optional()), + error: core.serialization.string().optional(), + providerLatency: core.serialization.property("provider_latency", core.serialization.number().optional()), + stdout: core.serialization.string().optional(), + providerRequest: core.serialization.property( + "provider_request", + core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), + ), + providerResponse: core.serialization.property( + "provider_response", + core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), + ), + inputs: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), + source: core.serialization.string().optional(), + metadata: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), + logStatus: core.serialization.property("log_status", LogStatus.optional()), + sourceDatapointId: core.serialization.property("source_datapoint_id", core.serialization.string().optional()), + traceParentId: core.serialization.property("trace_parent_id", core.serialization.string().optional()), + batches: core.serialization.list(core.serialization.string()).optional(), + user: core.serialization.string().optional(), + environment: core.serialization.string().optional(), + save: core.serialization.boolean().optional(), + logId: core.serialization.property("log_id", core.serialization.string().optional()), + id: core.serialization.string(), + evaluatorLogs: core.serialization.property( + "evaluator_logs", + core.serialization.list(core.serialization.lazyObject(() => serializers.EvaluatorLogResponse)), + ), + traceFlowId: core.serialization.property("trace_flow_id", core.serialization.string().optional()), + traceId: core.serialization.property("trace_id", core.serialization.string().optional()), + traceChildren: core.serialization.property( + "trace_children", + core.serialization.list(core.serialization.lazy(() => serializers.LogResponse)).optional(), + ), +}); + +export declare namespace ToolCallResponse { + export interface Raw { + start_time?: string | null; + end_time?: string | null; + tool: serializers.ToolResponse.Raw; + output?: string | null; + created_at?: string | null; + error?: string | null; + provider_latency?: number | null; + stdout?: string | null; + provider_request?: Record | null; + provider_response?: Record | null; + inputs?: Record | null; + source?: string | null; + metadata?: Record | null; + log_status?: LogStatus.Raw | null; + source_datapoint_id?: string | null; + trace_parent_id?: string | null; + batches?: string[] | null; + user?: string | null; + environment?: string | null; + save?: boolean | null; + log_id?: string | null; + id: string; + evaluator_logs: serializers.EvaluatorLogResponse.Raw[]; + trace_flow_id?: string | null; + trace_id?: string | null; + trace_children?: serializers.LogResponse.Raw[] | null; + } +} diff --git a/src/serialization/types/ToolLogResponse.ts b/src/serialization/types/ToolLogResponse.ts index 8fe4e521..53d2d62c 100644 --- a/src/serialization/types/ToolLogResponse.ts +++ b/src/serialization/types/ToolLogResponse.ts @@ -6,6 +6,7 @@ import * as serializers from "../index"; import * as Humanloop from "../../api/index"; import * as core from "../../core"; import { LogStatus } from "./LogStatus"; +import { ChatMessage } from "./ChatMessage"; export const ToolLogResponse: core.serialization.ObjectSchema< serializers.ToolLogResponse.Raw, @@ -49,6 +50,7 @@ export const ToolLogResponse: core.serialization.ObjectSchema< core.serialization.list(core.serialization.lazy(() => serializers.LogResponse)).optional(), ), tool: core.serialization.lazyObject(() => serializers.ToolResponse), + outputMessage: core.serialization.property("output_message", ChatMessage.optional()), }); export declare namespace ToolLogResponse { @@ -79,5 +81,6 @@ export declare namespace ToolLogResponse { trace_id?: string | null; trace_children?: serializers.LogResponse.Raw[] | null; tool: serializers.ToolResponse.Raw; + output_message?: ChatMessage.Raw | null; } } diff --git a/src/serialization/types/VersionDeploymentResponseFile.ts b/src/serialization/types/VersionDeploymentResponseFile.ts index 3ca1d69b..4e77d802 100644 --- a/src/serialization/types/VersionDeploymentResponseFile.ts +++ b/src/serialization/types/VersionDeploymentResponseFile.ts @@ -16,6 +16,7 @@ export const VersionDeploymentResponseFile: core.serialization.Schema< DatasetResponse, core.serialization.lazyObject(() => serializers.EvaluatorResponse), core.serialization.lazyObject(() => serializers.FlowResponse), + core.serialization.lazyObject(() => serializers.AgentResponse), ]); export declare namespace VersionDeploymentResponseFile { @@ -24,5 +25,6 @@ export declare namespace VersionDeploymentResponseFile { | serializers.ToolResponse.Raw | DatasetResponse.Raw | serializers.EvaluatorResponse.Raw - | serializers.FlowResponse.Raw; + | serializers.FlowResponse.Raw + | serializers.AgentResponse.Raw; } diff --git a/src/serialization/types/VersionIdResponseVersion.ts b/src/serialization/types/VersionIdResponseVersion.ts index 45b95dda..8c4e93ed 100644 --- a/src/serialization/types/VersionIdResponseVersion.ts +++ b/src/serialization/types/VersionIdResponseVersion.ts @@ -16,6 +16,7 @@ export const VersionIdResponseVersion: core.serialization.Schema< DatasetResponse, core.serialization.lazyObject(() => serializers.EvaluatorResponse), core.serialization.lazyObject(() => serializers.FlowResponse), + core.serialization.lazyObject(() => serializers.AgentResponse), ]); export declare namespace VersionIdResponseVersion { @@ -24,5 +25,6 @@ export declare namespace VersionIdResponseVersion { | serializers.ToolResponse.Raw | DatasetResponse.Raw | serializers.EvaluatorResponse.Raw - | serializers.FlowResponse.Raw; + | serializers.FlowResponse.Raw + | serializers.AgentResponse.Raw; } diff --git a/src/serialization/types/index.ts b/src/serialization/types/index.ts index 02032449..50b0fe8b 100644 --- a/src/serialization/types/index.ts +++ b/src/serialization/types/index.ts @@ -1,9 +1,38 @@ +export * from "./AgentCallResponseToolChoice"; +export * from "./AgentCallResponse"; +export * from "./AgentCallStreamResponsePayload"; +export * from "./AgentCallStreamResponse"; +export * from "./AgentContinueCallResponseToolChoice"; +export * from "./AgentContinueCallResponse"; +export * from "./AgentContinueCallStreamResponsePayload"; +export * from "./AgentContinueCallStreamResponse"; +export * from "./AgentInlineTool"; +export * from "./AgentKernelRequestTemplate"; +export * from "./AgentKernelRequestStop"; +export * from "./AgentKernelRequestReasoningEffort"; +export * from "./AgentKernelRequestToolsItem"; +export * from "./AgentKernelRequest"; +export * from "./AgentLinkedFileRequest"; +export * from "./AgentLinkedFileResponseFile"; +export * from "./AgentLinkedFileResponse"; +export * from "./AgentLogResponseToolChoice"; +export * from "./AgentLogResponse"; +export * from "./AgentLogStreamResponse"; +export * from "./AgentResponseTemplate"; +export * from "./AgentResponseStop"; +export * from "./AgentResponseReasoningEffort"; +export * from "./AgentResponseToolsItem"; +export * from "./AgentResponse"; +export * from "./AnthropicRedactedThinkingContent"; +export * from "./AnthropicThinkingContent"; export * from "./BooleanEvaluatorStatsResponse"; export * from "./ChatMessageContentItem"; export * from "./ChatMessageContent"; +export * from "./ChatMessageThinkingItem"; export * from "./ChatMessage"; export * from "./ChatRole"; export * from "./CodeEvaluatorRequest"; +export * from "./CreateAgentLogResponse"; export * from "./CreateDatapointRequestTargetValue"; export * from "./CreateDatapointRequest"; export * from "./CreateEvaluatorLogResponse"; @@ -43,9 +72,11 @@ export * from "./EvaluatorResponseSpec"; export * from "./EvaluatorResponse"; export * from "./EvaluatorReturnTypeEnum"; export * from "./EvaluatorVersionId"; +export * from "./EventType"; export * from "./ExternalEvaluatorRequest"; export * from "./FileEnvironmentResponseFile"; export * from "./FileEnvironmentResponse"; +export * from "./FileEnvironmentVariableRequest"; export * from "./FileId"; export * from "./FilePath"; export * from "./FileRequest"; @@ -63,7 +94,9 @@ export * from "./ImageUrlDetail"; export * from "./ImageUrl"; export * from "./InputResponse"; export * from "./LlmEvaluatorRequest"; +export * from "./LinkedFileRequest"; export * from "./LinkedToolResponse"; +export * from "./ListAgents"; export * from "./ListDatasets"; export * from "./ListEvaluators"; export * from "./ListFlows"; @@ -71,6 +104,7 @@ export * from "./ListPrompts"; export * from "./ListTools"; export * from "./LogResponse"; export * from "./LogStatus"; +export * from "./LogStreamResponse"; export * from "./ModelEndpoints"; export * from "./ModelProviders"; export * from "./MonitoringEvaluatorEnvironmentRequest"; @@ -79,7 +113,10 @@ export * from "./MonitoringEvaluatorState"; export * from "./MonitoringEvaluatorVersionRequest"; export * from "./NumericEvaluatorStatsResponse"; export * from "./ObservabilityStatus"; +export * from "./OnAgentCallEnum"; +export * from "./OpenAiReasoningEffort"; export * from "./OverallStats"; +export * from "./PaginatedDataAgentResponse"; export * from "./PaginatedDatapointResponse"; export * from "./PaginatedDatasetResponse"; export * from "./PaginatedDataEvaluationLogResponse"; @@ -89,11 +126,12 @@ export * from "./PaginatedDataFlowResponse"; export * from "./PaginatedDataLogResponse"; export * from "./PaginatedDataPromptResponse"; export * from "./PaginatedDataToolResponse"; -export * from "./PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem"; -export * from "./PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse"; +export * from "./PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem"; +export * from "./PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse"; export * from "./PlatformAccessEnum"; export * from "./PopulateTemplateResponseTemplate"; export * from "./PopulateTemplateResponseStop"; +export * from "./PopulateTemplateResponseReasoningEffort"; export * from "./PopulateTemplateResponsePopulatedTemplate"; export * from "./PopulateTemplateResponse"; export * from "./ProjectSortBy"; @@ -103,14 +141,15 @@ export * from "./PromptCallResponse"; export * from "./PromptCallStreamResponse"; export * from "./PromptKernelRequestTemplate"; export * from "./PromptKernelRequestStop"; +export * from "./PromptKernelRequestReasoningEffort"; export * from "./PromptKernelRequest"; export * from "./PromptLogResponseToolChoice"; export * from "./PromptLogResponse"; export * from "./PromptResponseTemplate"; export * from "./PromptResponseStop"; +export * from "./PromptResponseReasoningEffort"; export * from "./PromptResponse"; export * from "./ProviderApiKeys"; -export * from "./ReasoningEffort"; export * from "./ResponseFormatType"; export * from "./ResponseFormat"; export * from "./RunStatsResponseEvaluatorStatsItem"; @@ -123,6 +162,7 @@ export * from "./TextChatContent"; export * from "./TextEvaluatorStatsResponse"; export * from "./TimeUnit"; export * from "./ToolCall"; +export * from "./ToolCallResponse"; export * from "./ToolChoice"; export * from "./ToolFunction"; export * from "./ToolKernelRequest"; diff --git a/src/version.ts b/src/version.ts index fa30d486..e0c1405e 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const SDK_VERSION = "0.8.20"; +export const SDK_VERSION = "0.8.21-beta1"; diff --git a/tests/integration/decorators.test.ts b/tests/integration/decorators.test.ts new file mode 100644 index 00000000..0cddc948 --- /dev/null +++ b/tests/integration/decorators.test.ts @@ -0,0 +1,502 @@ +import OpenAI from "openai"; + +import { PromptRequest } from "../../src/api"; +import { HumanloopRuntimeError } from "../../src/error"; +import { + CleanupResources, + TestPrompt, + TestSetup, + cleanupTestEnvironment, + setupTestEnvironment, +} from "./fixtures"; + +// Long timeout per test +jest.setTimeout(30 * 1000); + +// process.stdout.moveCursor is undefined in jest; mocking it since STDOUT is not relevant +if (typeof process.stdout.moveCursor !== "function") { + process.stdout.moveCursor = ( + dx: number, + dy: number, + callback?: () => void, + ): boolean => { + if (callback) callback(); + return true; + }; +} + +/** + * Creates a test prompt in the specified test environment + */ +async function createTestPrompt( + setup: TestSetup, + name: string = "test_prompt", + customConfig?: Partial, +): Promise { + const promptPath = `${setup.sdkTestDir.path}/${name}`; + const config = customConfig + ? { ...setup.testPromptConfig, ...customConfig } + : setup.testPromptConfig; + + const promptResponse = await setup.humanloopClient.prompts.upsert({ + path: promptPath, + ...config, + }); + + return { + id: promptResponse.id, + path: promptPath, + response: promptResponse, + }; +} + +/** + * Creates a base function for LLM calls that can be decorated + */ +function createBaseLLMFunction(setup: TestSetup, model: string = "gpt-4o-mini") { + return async (question: string): Promise => { + const openaiClient = new OpenAI({ apiKey: setup.openaiApiKey }); + + const response = await openaiClient.chat.completions.create({ + model: model, + messages: [{ role: "user", content: question }], + }); + + return response.choices[0].message.content || ""; + }; +} + +/** + * Applies the prompt decorator to a function and tests it + */ +async function testPromptDecorator( + setup: TestSetup, + prompt: TestPrompt, + input: string = "What is the capital of the France?", + expectedSubstring: string = "paris", +): Promise { + // Create the base function + const myPromptBase = createBaseLLMFunction(setup); + + // Apply the higher-order function instead of decorator + const myPrompt = setup.humanloopClient.prompt({ + path: prompt.path, + callable: myPromptBase, + }); + + // Call the decorated function + const result = await myPrompt(input); + if (result) { + expect(result.toLowerCase()).toContain(expectedSubstring.toLowerCase()); + } else { + throw new Error("Expected result to be defined"); + } + + // Wait for 5 seconds for the log to be created + await new Promise((resolve) => setTimeout(resolve, 5000)); +} + +describe("decorators", () => { + it("should create a prompt log when using the decorator", async () => { + let testSetup: TestSetup | undefined = undefined; + let testPrompt: TestPrompt | undefined = undefined; + + try { + testSetup = await setupTestEnvironment("test_prompt_call_decorator"); + // Create test prompt + testPrompt = await createTestPrompt(testSetup); + + // Check initial version count + const promptVersionsResponse = + await testSetup.humanloopClient.prompts.listVersions(testPrompt.id); + expect(promptVersionsResponse.records.length).toBe(1); + + // Test the prompt decorator + await testPromptDecorator(testSetup, testPrompt); + + // Verify a new version was created + const updatedPromptVersionsResponse = + await testSetup.humanloopClient.prompts.listVersions(testPrompt.id); + expect(updatedPromptVersionsResponse.records.length).toBe(2); + + // Verify logs were created + const logsResponse = await testSetup.humanloopClient.logs.list({ + fileId: testPrompt.id, + page: 1, + size: 50, + }); + expect(logsResponse.data.length).toBe(1); + } catch (error) { + // Make sure to clean up if the test fails + const cleanupResources: CleanupResources[] = []; + if (testPrompt) { + cleanupResources.push({ + type: "prompt", + id: testPrompt.id, + }); + } + if (testSetup) { + await cleanupTestEnvironment(testSetup, cleanupResources); + } + throw error; + } + }); + + it("should create logs with proper tracing when using prompt in flow decorator", async () => { + let testSetup: TestSetup | undefined = undefined; + let flowId: string | null = null; + let promptId: string | null = null; + + try { + // Create test flow and prompt paths + testSetup = await setupTestEnvironment("test_flow_decorator"); + const flowPath = `${testSetup.sdkTestDir.path}/test_flow`; + const promptPath = `${testSetup.sdkTestDir.path}/test_prompt`; + + // Create the prompt + const promptResponse = await testSetup.humanloopClient.prompts.upsert({ + path: promptPath, + provider: "openai", + model: "gpt-4o-mini", + temperature: 0, + }); + const promptId = promptResponse.id; + + // Define the flow callable function with the correct type signature + const flowCallable = async (question: { + question: string; + }): Promise => { + const response = await testSetup!.humanloopClient.prompts.call({ + path: promptPath, + messages: [{ role: "user", content: question.question }], + providerApiKeys: { openai: testSetup!.openaiApiKey }, + }); + + const output = response.logs?.[0]?.output; + expect(output).not.toBeNull(); + return output || ""; + }; + + // Apply the flow decorator + const myFlow = testSetup.humanloopClient.flow({ + path: flowPath, + callable: flowCallable, + }); + + // Call the flow with the expected input format + const result = await myFlow({ + question: "What is the capital of the France?", + }); + expect(result?.toLowerCase()).toContain("paris"); + + // Wait for logs to be created + await new Promise((resolve) => setTimeout(resolve, 5000)); + + // Verify prompt logs + const promptRetrieveResponse = + await testSetup.humanloopClient.files.retrieveByPath({ + path: promptPath, + }); + expect(promptRetrieveResponse).not.toBeNull(); + const promptLogsResponse = await testSetup.humanloopClient.logs.list({ + fileId: promptRetrieveResponse.id, + page: 1, + size: 50, + }); + expect(promptLogsResponse.data.length).toBe(1); + const promptLog = promptLogsResponse.data[0]; + + // Verify flow logs + const flowRetrieveResponse = + await testSetup.humanloopClient.files.retrieveByPath({ + path: flowPath, + }); + expect(flowRetrieveResponse).not.toBeNull(); + flowId = flowRetrieveResponse.id; + const flowLogsResponse = await testSetup.humanloopClient.logs.list({ + fileId: flowRetrieveResponse.id, + page: 1, + size: 50, + }); + expect(flowLogsResponse.data.length).toBe(1); + const flowLog = flowLogsResponse.data[0]; + + // Verify tracing between logs + expect(promptLog.traceParentId).toBe(flowLog.id); + } finally { + // Clean up resources + const cleanupResources: CleanupResources[] = []; + if (flowId) { + cleanupResources.push({ + type: "flow", + id: flowId, + }); + } + if (promptId) { + cleanupResources.push({ + type: "prompt", + id: promptId, + }); + } + if (testSetup) { + await cleanupTestEnvironment(testSetup, cleanupResources); + } + } + }); + + it("should log exceptions when using the flow decorator", async () => { + let testSetup: TestSetup | undefined = undefined; + let flowId: string | null = null; + + try { + // Create test flow path + testSetup = await setupTestEnvironment("test_flow_decorator"); + const flowPath = `${testSetup.sdkTestDir.path}/test_flow_log_error`; + + // Define a flow callable that throws an error + const flowCallable = async ({ + question, + }: { + question: string; + }): Promise => { + throw new Error("This is a test exception"); + }; + + // Apply the flow decorator + const myFlow = testSetup.humanloopClient.flow({ + path: flowPath, + callable: flowCallable, + }); + + // Call the flow and expect it to throw + try { + await myFlow({ question: "test" }); + // If we get here, the test should fail + throw new Error("Expected flow to throw an error but it didn't"); + } catch (error) { + // Expected error + expect(error).toBeDefined(); + } + + // Wait for logs to be created + await new Promise((resolve) => setTimeout(resolve, 5000)); + + // Verify flow logs + const flowRetrieveResponse = + await testSetup.humanloopClient.files.retrieveByPath({ + path: flowPath, + }); + expect(flowRetrieveResponse).not.toBeNull(); + flowId = flowRetrieveResponse.id; + + const flowLogsResponse = await testSetup.humanloopClient.logs.list({ + fileId: flowRetrieveResponse.id, + page: 1, + size: 50, + }); + expect(flowLogsResponse.data.length).toBe(1); + + const flowLog = flowLogsResponse.data[0]; + expect(flowLog.error).not.toBeUndefined(); + expect(flowLog.output).toBeUndefined(); + } finally { + if (testSetup) { + await cleanupTestEnvironment( + testSetup, + flowId + ? [ + { + type: "flow", + id: flowId, + }, + ] + : [], + ); + } + } + }); + + it("should populate outputMessage when flow returns chat message format", async () => { + let testSetup: TestSetup | undefined = undefined; + let flowId: string | null = null; + + try { + // Create test flow path + testSetup = await setupTestEnvironment("test_flow_decorator"); + const flowPath = `${testSetup.sdkTestDir.path}/test_flow_log_output_message`; + + // Define a flow callable that returns a chat message format + const flowCallable = async ({ question }: { question: string }) => { + return { + role: "user", + content: question, + }; + }; + + // Apply the flow decorator + const myFlow = testSetup.humanloopClient.flow({ + path: flowPath, + callable: flowCallable, + }); + + // Call the flow and check the returned message + const result = await myFlow({ + question: "What is the capital of the France?", + }); + expect(result?.content.toLowerCase()).toContain("france"); + + // Wait for logs to be created + await new Promise((resolve) => setTimeout(resolve, 5000)); + + // Verify flow logs + const flowRetrieveResponse = + await testSetup.humanloopClient.files.retrieveByPath({ + path: flowPath, + }); + expect(flowRetrieveResponse).not.toBeNull(); + flowId = flowRetrieveResponse.id; + + const flowLogsResponse = await testSetup.humanloopClient.logs.list({ + fileId: flowRetrieveResponse.id, + page: 1, + size: 50, + }); + expect(flowLogsResponse.data.length).toBe(1); + + const flowLog = flowLogsResponse.data[0]; + expect(flowLog.outputMessage).not.toBeUndefined(); + expect(flowLog.output).toBeUndefined(); + expect(flowLog.error).toBeUndefined(); + } finally { + // Clean up resources + if (flowId) { + await testSetup!.humanloopClient.flows.delete(flowId); + } + if (testSetup) { + await cleanupTestEnvironment( + testSetup, + flowId + ? [ + { + type: "flow", + id: flowId, + }, + ] + : [], + ); + } + } + }); + + it("should run evaluations on a flow decorator", async () => { + let testSetup: TestSetup | undefined = undefined; + let flowId: string | null = null; + + try { + // Use fixtures from testSetup + testSetup = await setupTestEnvironment("eval-flow-decorator"); + if (!testSetup.evalDataset || !testSetup.outputNotNullEvaluator) { + throw new Error("Required fixtures are not initialized"); + } + + // Create test flow path + const flowPath = `${testSetup.sdkTestDir.path}/test_flow_evaluate`; + + // Define flow decorated function + const myFlow = testSetup.humanloopClient.flow({ + path: flowPath, + callable: async (inputs: { question: string }) => { + return "paris"; + }, + }); + + // Run evaluation on the flow + await testSetup.humanloopClient.evaluations.run({ + name: "Evaluate Flow Decorator", + file: { + path: flowPath, + callable: myFlow, + type: "flow", + }, + dataset: { + path: testSetup.evalDataset.path, + }, + evaluators: [ + { + path: testSetup.outputNotNullEvaluator.path, + }, + ], + }); + + // Get the flow ID for cleanup + const flowResponse = await testSetup.humanloopClient.files.retrieveByPath({ + path: flowPath, + }); + flowId = flowResponse.id; + } finally { + if (testSetup) { + await cleanupTestEnvironment( + testSetup, + flowId + ? [ + { + type: "flow", + id: flowId, + }, + ] + : [], + ); + } + } + }); + + it("should throw error when using non-existent file ID instead of path", async () => { + // Use fixtures from testSetup + let testSetup: TestSetup | undefined = undefined; + try { + testSetup = await setupTestEnvironment("eval-flow-decorator"); + if (!testSetup.evalDataset || !testSetup.outputNotNullEvaluator) { + throw new Error("Required fixtures are not initialized"); + } + // Define a simple callable + const simpleCallable = (x: any) => x; + + // Expect the evaluation to throw an error with a non-existent file ID + try { + await testSetup.humanloopClient.evaluations.run({ + name: "Evaluate Flow Decorator", + file: { + id: "non-existent-file-id", + type: "flow", + version: { + attributes: { + foo: "bar", + }, + }, + callable: simpleCallable, + }, + dataset: { + path: testSetup.evalDataset.path, + }, + evaluators: [ + { + path: testSetup.outputNotNullEvaluator.path, + }, + ], + }); + + // If we get here, the test should fail + throw new Error("Expected HumanloopRuntimeError but none was thrown"); + } catch (error) { + expect(error).toBeInstanceOf(HumanloopRuntimeError); + expect((error as HumanloopRuntimeError).message).toContain( + "File does not exist on Humanloop. Please provide a `file.path` and a version to create a new version.", + ); + } + } finally { + if (testSetup) { + await cleanupTestEnvironment(testSetup); + } + } + }); +}); diff --git a/tests/integration/evals.test.ts b/tests/integration/evals.test.ts new file mode 100644 index 00000000..17d8a399 --- /dev/null +++ b/tests/integration/evals.test.ts @@ -0,0 +1,577 @@ +import { FlowResponse } from "../../src/api"; +import { HumanloopRuntimeError } from "../../src/error"; +import { HumanloopClient } from "../../src/humanloop.client"; +import { + cleanupTestEnvironment, + readEnvironment, + setupTestEnvironment, +} from "./fixtures"; + +// process.stdout.moveCursor is undefined in jest; mocking it since STDOUT is not relevant +if (typeof process.stdout.moveCursor !== "function") { + process.stdout.moveCursor = ( + dx: number, + dy: number, + callback?: () => void, + ): boolean => { + if (callback) callback(); + return true; + }; +} + +// Long timeout per test; evals might take a while to run +jest.setTimeout(30 * 1000); + +interface TestIdentifiers { + id: string; + path: string; +} + +interface TestSetup { + sdkTestDir: TestIdentifiers; + outputNotNullEvaluator: TestIdentifiers; + evalDataset: TestIdentifiers; + evalPrompt: TestIdentifiers; + stagingEnvironmentId: string; +} + +describe("Evals", () => { + let humanloopClient: HumanloopClient; + let openaiApiKey: string; + + beforeAll(async () => { + readEnvironment(); + if (!process.env.HUMANLOOP_API_KEY) { + throw new Error("HUMANLOOP_API_KEY is not set"); + } + if (!process.env.OPENAI_API_KEY) { + throw new Error("OPENAI_API_KEY is not set for integration tests"); + } + openaiApiKey = process.env.OPENAI_API_KEY; + humanloopClient = new HumanloopClient({ + apiKey: process.env.HUMANLOOP_API_KEY, + }); + }); + + it("should be able to import HumanloopClient", async () => { + const client = new HumanloopClient({ apiKey: process.env.HUMANLOOP_API_KEY }); + expect(client).toBeDefined(); + }); + + it("should run evaluation on online files", async () => { + // Setup test-specific environment + const setup = await setupTestEnvironment("online_files"); + + try { + await humanloopClient.evaluations.run({ + file: { + path: setup.evalPrompt.path, + type: "prompt", + }, + dataset: { + path: setup.evalDataset.path, + }, + name: "test_eval_run", + evaluators: [ + { + path: setup.outputNotNullEvaluator.path, + }, + ], + }); + + // Wait for evaluation to complete + await new Promise((resolve) => setTimeout(resolve, 5000)); + + const evalResponse = await humanloopClient.evaluations.list({ + fileId: setup.evalPrompt.id, + }); + expect(evalResponse.data.length).toBe(1); + + const evaluationId = evalResponse.data[0].id; + const runsResponse = + await humanloopClient.evaluations.listRunsForEvaluation(evaluationId); + expect(runsResponse.runs[0].status).toBe("completed"); + } finally { + // Clean up test-specific resources + await cleanupTestEnvironment(setup); + } + }); + + it("should run evaluation with version_id", async () => { + // Setup test-specific environment + const setup = await setupTestEnvironment("version_id"); + + try { + // Create a new prompt version + const newPromptVersionResponse = await humanloopClient.prompts.upsert({ + path: setup.evalPrompt.path, + provider: "openai", + model: "gpt-4o-mini", + temperature: 0, + template: [ + { + role: "system", + content: + "You are a helpful assistant. You must answer the user's question truthfully and at the level of a 5th grader.", + }, + { + role: "user", + content: "{{question}}", + }, + ], + }); + + // Run evaluation with version_id + await humanloopClient.evaluations.run({ + file: { + id: newPromptVersionResponse.id, + versionId: newPromptVersionResponse.versionId, + type: "prompt", + }, + dataset: { + path: setup.evalDataset.path, + }, + name: "test_eval_run", + evaluators: [ + { + path: setup.outputNotNullEvaluator.path, + }, + ], + }); + + // Verify evaluation + const evaluationsResponse = await humanloopClient.evaluations.list({ + fileId: newPromptVersionResponse.id, + }); + expect(evaluationsResponse.data.length).toBe(1); + + const evaluationId = evaluationsResponse.data[0].id; + const runsResponse = + await humanloopClient.evaluations.listRunsForEvaluation(evaluationId); + expect(runsResponse.runs[0].status).toBe("completed"); + if (runsResponse.runs[0].version) { + expect(runsResponse.runs[0].version.versionId).toBe( + newPromptVersionResponse.versionId, + ); + } + + // Verify version is not the default + const response = await humanloopClient.prompts.get( + newPromptVersionResponse.id, + ); + expect(response.versionId).not.toBe(newPromptVersionResponse.versionId); + } finally { + // Clean up test-specific resources + await cleanupTestEnvironment(setup); + } + }); + + it("should run evaluation with environment", async () => { + // Setup test-specific environment + const setup = await setupTestEnvironment("environment"); + + try { + // Create a new prompt version and deploy to staging + const newPromptVersionResponse = await humanloopClient.prompts.upsert({ + path: setup.evalPrompt.path, + provider: "openai", + model: "gpt-4o-mini", + temperature: 0, + template: [ + { + role: "system", + content: + "You are a helpful assistant. You must answer the user's question truthfully and at the level of a 5th grader.", + }, + { + role: "user", + content: "{{question}}", + }, + ], + }); + + await humanloopClient.prompts.setDeployment( + newPromptVersionResponse.id, + setup.stagingEnvironmentId, + { + versionId: newPromptVersionResponse.versionId, + }, + ); + + // Run evaluation with environment + await humanloopClient.evaluations.run({ + file: { + id: newPromptVersionResponse.id, + type: "prompt", + environment: "staging", + }, + dataset: { + path: setup.evalDataset.path, + }, + name: "test_eval_run", + evaluators: [ + { + path: setup.outputNotNullEvaluator.path, + }, + ], + }); + + // Verify evaluation + const evaluationsResponse = await humanloopClient.evaluations.list({ + fileId: newPromptVersionResponse.id, + }); + expect(evaluationsResponse.data.length).toBe(1); + + const evaluationId = evaluationsResponse.data[0].id; + const runsResponse = + await humanloopClient.evaluations.listRunsForEvaluation(evaluationId); + expect(runsResponse.runs[0].status).toBe("completed"); + if (runsResponse.runs[0].version) { + expect(runsResponse.runs[0].version.versionId).toBe( + newPromptVersionResponse.versionId, + ); + } + + const defaultPromptVersionResponse = await humanloopClient.prompts.get( + newPromptVersionResponse.id, + ); + expect(defaultPromptVersionResponse.versionId).not.toBe( + newPromptVersionResponse.versionId, + ); + } finally { + // Clean up test-specific resources + await cleanupTestEnvironment(setup); + } + }); + + it("should fail when using version_id with path", async () => { + // Setup test-specific environment + const setup = await setupTestEnvironment("fail_with_version_id"); + + try { + try { + await humanloopClient.evaluations.run({ + file: { + path: setup.evalPrompt.path, + type: "prompt", + versionId: "will_not_work", + }, + dataset: { + path: setup.evalDataset.path, + }, + name: "test_eval_run", + evaluators: [ + { + path: setup.outputNotNullEvaluator.path, + }, + ], + }); + // If we got here, the test failed + throw new Error("Expected runtime error but none was thrown"); + } catch (error: any) { + if (error instanceof HumanloopRuntimeError) { + expect(error.message).toContain( + "You must provide the `file.id` when addressing a file by version ID or environment", + ); + } else { + throw new Error( + `Expected test to fail for version_id but got ${error}`, + ); + } + } + } finally { + // Clean up test-specific resources + await cleanupTestEnvironment(setup); + } + }); + + it("should fail when using environment with path", async () => { + // Setup test-specific environment + const setup = await setupTestEnvironment("fail_with_environment"); + + try { + await humanloopClient.evaluations.run({ + file: { + path: setup.evalPrompt.path, + type: "prompt", + environment: "staging", + }, + dataset: { + path: setup.evalDataset.path, + }, + name: "test_eval_run", + evaluators: [ + { + path: setup.outputNotNullEvaluator.path, + }, + ], + }); + // If we got here, the test failed + throw new Error("Expected runtime error but none was thrown"); + } catch (error: any) { + if (error instanceof HumanloopRuntimeError) { + expect(error.message).toContain( + "You must provide the `file.id` when addressing a file by version ID or environment", + ); + } else { + throw new Error( + `Expected test to fail for environment but got ${error}`, + ); + } + } finally { + // Clean up test-specific resources + await cleanupTestEnvironment(setup); + } + }); + + it("should run evaluation with version upsert", async () => { + // Setup test-specific environment + const setup = await setupTestEnvironment("version_upsert"); + + try { + await humanloopClient.evaluations.run({ + file: { + path: setup.evalPrompt.path, + type: "prompt", + version: { + provider: "openai", + model: "gpt-4o-mini", + temperature: 1, + template: [ + { + role: "system", + content: + "You are a helpful assistant. You must answer the user's question truthfully and at the level of a 5th grader.", + }, + { + role: "user", + content: "{{question}}", + }, + ], + }, + }, + dataset: { + path: setup.evalDataset.path, + }, + name: "test_eval_run", + evaluators: [ + { + path: setup.outputNotNullEvaluator.path, + }, + ], + }); + + // Verify evaluation + const evaluationsResponse = await humanloopClient.evaluations.list({ + fileId: setup.evalPrompt.id, + }); + expect(evaluationsResponse.data.length).toBe(1); + + const evaluationId = evaluationsResponse.data[0].id; + const runsResponse = + await humanloopClient.evaluations.listRunsForEvaluation(evaluationId); + expect(runsResponse.runs[0].status).toBe("completed"); + + // Verify version upsert + const listPromptVersionsResponse = + await humanloopClient.prompts.listVersions(setup.evalPrompt.id); + expect(listPromptVersionsResponse.records.length).toBe(2); + } finally { + // Clean up test-specific resources + await cleanupTestEnvironment(setup); + } + }); + + it("should fail flow eval without callable", async () => { + // Setup test-specific environment + const setup = await setupTestEnvironment("flow_fail_without_callable"); + + try { + try { + await humanloopClient.evaluations.run({ + file: { + path: "Test Flow", + type: "flow", + version: { + attributes: { + foo: "bar", + }, + }, + }, + dataset: { + path: setup.evalDataset.path, + }, + name: "test_eval_run", + evaluators: [ + { + path: setup.outputNotNullEvaluator.path, + }, + ], + }); + // If we got here, the test failed + fail("Expected runtime error but none was thrown"); + } catch (error: any) { + expect(error.message).toContain( + "You must provide a `callable` for your Flow `file` to run a local eval.", + ); + } + } finally { + // Clean up test-specific resources + await cleanupTestEnvironment(setup); + } + }); + + it("should run flow eval with callable", async () => { + // Setup test-specific environment + const setup = await setupTestEnvironment("flow_with_callable"); + + try { + const flowPath = `${setup.sdkTestDir.path}/Test Flow`; + + // Create flow + const flowResponse = await humanloopClient.flows.upsert({ + path: flowPath, + attributes: { + foo: "bar", + }, + }); + + try { + const flow = await humanloopClient.flows.upsert({ + path: flowPath, + attributes: { + foo: "bar", + }, + }); + + // Run evaluation with flow + await humanloopClient.evaluations.run({ + file: { + id: flow.id, + type: "flow", + callable: ({ question }) => + "It's complicated don't worry about it", + version: { + attributes: { + foo: "bar", + }, + }, + }, + dataset: { + path: setup.evalDataset.path, + }, + name: "test_eval_run", + evaluators: [ + { + path: setup.outputNotNullEvaluator.path, + }, + ], + }); + + // Verify evaluation + const evaluationsResponse = await humanloopClient.evaluations.list({ + fileId: flow.id, + }); + expect(evaluationsResponse.data.length).toBe(1); + + const evaluationId = evaluationsResponse.data[0].id; + const runsResponse = + await humanloopClient.evaluations.listRunsForEvaluation( + evaluationId, + ); + expect(runsResponse.runs[0].status).toBe("completed"); + } finally { + await humanloopClient.flows.delete(flowResponse.id); + } + } finally { + // Clean up test-specific resources + await cleanupTestEnvironment(setup); + } + }); + + it("should not allow evaluating agent with callable", async () => { + // Setup test-specific environment + const setup = await setupTestEnvironment("agent_with_callable"); + + try { + try { + await humanloopClient.evaluations.run({ + file: { + path: "Test Agent", + type: "agent", + callable: (inputs: any) => "bar", + }, + dataset: { + path: setup.evalDataset.path, + }, + name: "test_eval_run", + evaluators: [ + { + path: setup.outputNotNullEvaluator.path, + }, + ], + }); + // If we got here, the test failed + fail("Expected ValueError but none was thrown"); + } catch (error: any) { + expect(error.message).toBe( + "Agent evaluation is only possible on the Humanloop runtime, do not provide a `callable`.", + ); + } + } finally { + // Clean up test-specific resources + await cleanupTestEnvironment(setup); + } + }); + + it("should resolve to default flow version when callable is provided without version", async () => { + // Setup test-specific environment + const setup = await setupTestEnvironment("flow_with_callable_without_version"); + let flowResponse: FlowResponse; + try { + const flowPath = `${setup.sdkTestDir.path}/Test Flow`; + + // Create flow + flowResponse = await humanloopClient.flows.upsert({ + path: flowPath, + attributes: { + foo: "bar", + }, + }); + + // Run evaluation with flow + await humanloopClient.evaluations.run({ + file: { + id: flowResponse.id, + type: "flow", + callable: ({ question }) => "It's complicated don't worry about it", + }, + dataset: { + path: setup.evalDataset.path, + }, + name: "test_eval_run", + evaluators: [ + { + path: setup.outputNotNullEvaluator.path, + }, + ], + }); + + // Verify evaluation + const evaluationsResponse = await humanloopClient.evaluations.list({ + fileId: flowResponse.id, + }); + expect(evaluationsResponse.data.length).toBe(1); + + const evaluationId = evaluationsResponse.data[0].id; + const runsResponse = + await humanloopClient.evaluations.listRunsForEvaluation(evaluationId); + expect(runsResponse.runs[0].status).toBe("completed"); + } finally { + // Clean up test-specific resources + await cleanupTestEnvironment(setup, [ + { id: flowResponse!.id, type: "flow" }, + ]); + } + }); +}); diff --git a/tests/integration/fixtures.ts b/tests/integration/fixtures.ts new file mode 100644 index 00000000..41ccf486 --- /dev/null +++ b/tests/integration/fixtures.ts @@ -0,0 +1,246 @@ +import dotenv from "dotenv"; +import { OpenAI } from "openai"; +import { v4 as uuidv4 } from "uuid"; + +import { FileType, PromptRequest, PromptResponse } from "../../src/api"; +import { HumanloopClient } from "../../src/humanloop.client"; + +export interface TestIdentifiers { + id: string; + path: string; +} + +export interface TestPrompt { + id: string; + path: string; + response: PromptResponse; +} + +export interface TestSetup { + sdkTestDir: TestIdentifiers; + testPromptConfig: PromptRequest; + openaiApiKey: string; + humanloopClient: HumanloopClient; + evalDataset: TestIdentifiers; + evalPrompt: TestIdentifiers; + stagingEnvironmentId: string; + outputNotNullEvaluator: TestIdentifiers; +} + +export interface CleanupResources { + type: FileType; + id: string; +} + +export function readEnvironment(): void { + if (![process.env.HUMANLOOP_API_KEY, process.env.OPENAI_API_KEY].every(Boolean)) { + // Testing locally not in CI, running dotenv.config() would override the secrets set for GitHub Action + dotenv.config({}); + } + if (!process.env.HUMANLOOP_API_KEY) { + throw new Error("HUMANLOOP_API_KEY is not set"); + } + if (!process.env.OPENAI_API_KEY) { + throw new Error("OPENAI_API_KEY is not set for integration tests"); + } +} + +export function getSubclient(client: HumanloopClient, type: FileType) { + switch (type) { + case "prompt": + return client.prompts; + case "tool": + return client.tools; + case "flow": + return client.flows; + case "agent": + return client.agents; + case "dataset": + return client.datasets; + case "evaluator": + return client.evaluators; + default: + throw new Error(`Unsupported file type: ${type}`); + } +} + +export async function setupTestEnvironment(testName: string): Promise { + readEnvironment(); + + const openaiApiKey = process.env.OPENAI_API_KEY!; + const humanloopClient = new HumanloopClient({ + apiKey: process.env.HUMANLOOP_API_KEY, + instrumentProviders: { + OpenAI: OpenAI, + }, + }); + + // Create a test directory + const directoryPath = `SDK_TEST_${testName}_${uuidv4()}`; + const response = await humanloopClient.directories.create({ + path: directoryPath, + }); + + const sdkTestDir = { + id: response.id, + path: response.path, + }; + + // Create test prompt config + const testPromptConfig: PromptRequest = { + provider: "openai", + model: "gpt-4o-mini", + temperature: 0.5, + template: [ + { + role: "system", + content: "You are a helpful assistant. Answer concisely.", + }, + { + role: "user", + content: "{{question}}", + }, + ], + }; + + // Create evaluator for testing + const evaluatorPath = `${sdkTestDir.path}/output_not_null_evaluator`; + const evaluatorResponse = await humanloopClient.evaluators.upsert({ + path: evaluatorPath, + spec: { + argumentsType: "target_required", + returnType: "boolean", + code: ` +def output_not_null(log: dict) -> bool: + return log["output"] is not None + `, + evaluatorType: "python", + }, + }); + const outputNotNullEvaluator = { + id: evaluatorResponse.id, + path: evaluatorPath, + }; + + // Create dataset for testing + const datasetPath = `${sdkTestDir.path}/eval_dataset`; + const datasetResponse = await humanloopClient.datasets.upsert({ + path: datasetPath, + datapoints: [ + { + inputs: { question: "What is the capital of the France?" }, + target: { output: "Paris" }, + }, + { + inputs: { question: "What is the capital of the Germany?" }, + target: { output: "Berlin" }, + }, + { + inputs: { question: "What is 2+2?" }, + target: { output: "4" }, + }, + ], + }); + const evalDataset = { + id: datasetResponse.id, + path: datasetResponse.path, + }; + + // Create prompt + const promptPath = `${sdkTestDir.path}/eval_prompt`; + const promptResponse = await humanloopClient.prompts.upsert({ + path: promptPath, + ...(testPromptConfig as PromptRequest), + }); + const evalPrompt = { + id: promptResponse.id, + path: promptResponse.path, + }; + + // Get staging environment ID + const environmentsResponse = await humanloopClient.prompts.listEnvironments( + evalPrompt.id, + ); + let stagingEnvironmentId = ""; + for (const environment of environmentsResponse) { + if (environment.name === "staging") { + stagingEnvironmentId = environment.id; + break; + } + } + if (!stagingEnvironmentId) { + throw new Error("Staging environment not found"); + } + + return { + testPromptConfig, + openaiApiKey, + humanloopClient, + sdkTestDir, + outputNotNullEvaluator, + evalDataset, + evalPrompt, + stagingEnvironmentId, + }; +} + +/** + * Cleans up all test resources + * @param setup The test setup containing the resources + * @param resources Additional resources to clean up + */ +export async function cleanupTestEnvironment( + setup: TestSetup, + resources?: CleanupResources[], +): Promise { + try { + // First clean up any additional resources + if (resources) { + for (const resource of resources) { + const subclient = getSubclient(setup.humanloopClient, resource.type); + if (resource.id) { + await subclient.delete(resource.id); + } + } + } + + // Clean up fixed test resources + if (setup.outputNotNullEvaluator?.id) { + try { + await setup.humanloopClient.evaluators.delete( + setup.outputNotNullEvaluator.id, + ); + } catch (error) { + console.warn( + `Failed to delete evaluator ${setup.outputNotNullEvaluator.id}:`, + error, + ); + } + } + + if (setup.evalDataset?.id) { + try { + await setup.humanloopClient.datasets.delete(setup.evalDataset.id); + } catch (error) { + console.warn( + `Failed to delete dataset ${setup.evalDataset.id}:`, + error, + ); + } + } + + // Finally, clean up the test directory + if (setup.sdkTestDir.id) { + try { + await setup.humanloopClient.directories.delete(setup.sdkTestDir.id); + } catch (error) { + console.warn( + `Failed to delete directory ${setup.sdkTestDir.id}:`, + error, + ); + } + } + } catch (error) { + console.error("Error during cleanup:", error); + } +} diff --git a/yarn.lock b/yarn.lock index 96d36d5b..d6cee5a6 100644 --- a/yarn.lock +++ b/yarn.lock @@ -79,45 +79,45 @@ "@smithy/util-utf8" "^2.0.0" tslib "^2.6.2" -"@aws-sdk/client-cognito-identity@3.787.0": - version "3.787.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/client-cognito-identity/-/client-cognito-identity-3.787.0.tgz#15e631b425fb32865e0479b9e47138125b7dcd43" - integrity sha512-7v6nywZ5wcQxX7qdZ5M1ld15QdkzLU6fAKiEqbvJKu4dM8cFW6As+DbS990Mg46pp1xM/yvme+51xZDTfTfJZA== +"@aws-sdk/client-cognito-identity@3.799.0": + version "3.799.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/client-cognito-identity/-/client-cognito-identity-3.799.0.tgz#80fd73c4e664427e86026f9a302f6b646d935d46" + integrity sha512-gg1sncxYDpYWetey3v/nw9zSkL/Vj2potpeO9sYWY2brcm8SbGh106I6IM/gX6KnY9Y2Bre8xb+JoZGz6ntcnw== dependencies: "@aws-crypto/sha256-browser" "5.2.0" "@aws-crypto/sha256-js" "5.2.0" - "@aws-sdk/core" "3.775.0" - "@aws-sdk/credential-provider-node" "3.787.0" + "@aws-sdk/core" "3.799.0" + "@aws-sdk/credential-provider-node" "3.799.0" "@aws-sdk/middleware-host-header" "3.775.0" "@aws-sdk/middleware-logger" "3.775.0" "@aws-sdk/middleware-recursion-detection" "3.775.0" - "@aws-sdk/middleware-user-agent" "3.787.0" + "@aws-sdk/middleware-user-agent" "3.799.0" "@aws-sdk/region-config-resolver" "3.775.0" "@aws-sdk/types" "3.775.0" "@aws-sdk/util-endpoints" "3.787.0" "@aws-sdk/util-user-agent-browser" "3.775.0" - "@aws-sdk/util-user-agent-node" "3.787.0" + "@aws-sdk/util-user-agent-node" "3.799.0" "@smithy/config-resolver" "^4.1.0" - "@smithy/core" "^3.2.0" + "@smithy/core" "^3.3.0" "@smithy/fetch-http-handler" "^5.0.2" "@smithy/hash-node" "^4.0.2" "@smithy/invalid-dependency" "^4.0.2" "@smithy/middleware-content-length" "^4.0.2" - "@smithy/middleware-endpoint" "^4.1.0" - "@smithy/middleware-retry" "^4.1.0" + "@smithy/middleware-endpoint" "^4.1.1" + "@smithy/middleware-retry" "^4.1.1" "@smithy/middleware-serde" "^4.0.3" "@smithy/middleware-stack" "^4.0.2" "@smithy/node-config-provider" "^4.0.2" "@smithy/node-http-handler" "^4.0.4" "@smithy/protocol-http" "^5.1.0" - "@smithy/smithy-client" "^4.2.0" + "@smithy/smithy-client" "^4.2.1" "@smithy/types" "^4.2.0" "@smithy/url-parser" "^4.0.2" "@smithy/util-base64" "^4.0.0" "@smithy/util-body-length-browser" "^4.0.0" "@smithy/util-body-length-node" "^4.0.0" - "@smithy/util-defaults-mode-browser" "^4.0.8" - "@smithy/util-defaults-mode-node" "^4.0.8" + "@smithy/util-defaults-mode-browser" "^4.0.9" + "@smithy/util-defaults-mode-node" "^4.0.9" "@smithy/util-endpoints" "^3.0.2" "@smithy/util-middleware" "^4.0.2" "@smithy/util-retry" "^4.0.2" @@ -125,44 +125,44 @@ tslib "^2.6.2" "@aws-sdk/client-sagemaker@^3.583.0": - version "3.792.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/client-sagemaker/-/client-sagemaker-3.792.0.tgz#394b01a59c9328744609736989a526b0008b804a" - integrity sha512-5brGHGc01Vsgy7bn+uNCoSbho4NsFJGj8qjY+eVkXrILaMMzGplHUPZMuJHiHWGgk4uqzRvTrxzCC/3VvOdiaQ== + version "3.799.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/client-sagemaker/-/client-sagemaker-3.799.0.tgz#b6b4481f707c7e5d2536fe307e6f341f995a5827" + integrity sha512-So85e7gS7VW64ePgeVJNCxbSMU2tINQk/f3TRe7yKfdxQVvyq53jx88AwJgk2WUZYJxlDgEK6fBDvimzwFKJDA== dependencies: "@aws-crypto/sha256-browser" "5.2.0" "@aws-crypto/sha256-js" "5.2.0" - "@aws-sdk/core" "3.775.0" - "@aws-sdk/credential-provider-node" "3.787.0" + "@aws-sdk/core" "3.799.0" + "@aws-sdk/credential-provider-node" "3.799.0" "@aws-sdk/middleware-host-header" "3.775.0" "@aws-sdk/middleware-logger" "3.775.0" "@aws-sdk/middleware-recursion-detection" "3.775.0" - "@aws-sdk/middleware-user-agent" "3.787.0" + "@aws-sdk/middleware-user-agent" "3.799.0" "@aws-sdk/region-config-resolver" "3.775.0" "@aws-sdk/types" "3.775.0" "@aws-sdk/util-endpoints" "3.787.0" "@aws-sdk/util-user-agent-browser" "3.775.0" - "@aws-sdk/util-user-agent-node" "3.787.0" + "@aws-sdk/util-user-agent-node" "3.799.0" "@smithy/config-resolver" "^4.1.0" - "@smithy/core" "^3.2.0" + "@smithy/core" "^3.3.0" "@smithy/fetch-http-handler" "^5.0.2" "@smithy/hash-node" "^4.0.2" "@smithy/invalid-dependency" "^4.0.2" "@smithy/middleware-content-length" "^4.0.2" - "@smithy/middleware-endpoint" "^4.1.0" - "@smithy/middleware-retry" "^4.1.0" + "@smithy/middleware-endpoint" "^4.1.1" + "@smithy/middleware-retry" "^4.1.1" "@smithy/middleware-serde" "^4.0.3" "@smithy/middleware-stack" "^4.0.2" "@smithy/node-config-provider" "^4.0.2" "@smithy/node-http-handler" "^4.0.4" "@smithy/protocol-http" "^5.1.0" - "@smithy/smithy-client" "^4.2.0" + "@smithy/smithy-client" "^4.2.1" "@smithy/types" "^4.2.0" "@smithy/url-parser" "^4.0.2" "@smithy/util-base64" "^4.0.0" "@smithy/util-body-length-browser" "^4.0.0" "@smithy/util-body-length-node" "^4.0.0" - "@smithy/util-defaults-mode-browser" "^4.0.8" - "@smithy/util-defaults-mode-node" "^4.0.8" + "@smithy/util-defaults-mode-browser" "^4.0.9" + "@smithy/util-defaults-mode-node" "^4.0.9" "@smithy/util-endpoints" "^3.0.2" "@smithy/util-middleware" "^4.0.2" "@smithy/util-retry" "^4.0.2" @@ -172,117 +172,117 @@ tslib "^2.6.2" uuid "^9.0.1" -"@aws-sdk/client-sso@3.787.0": - version "3.787.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/client-sso/-/client-sso-3.787.0.tgz#39f1182296b586cb957b449b5f0dabd8f378cf1a" - integrity sha512-L8R+Mh258G0DC73ktpSVrG4TT9i2vmDLecARTDR/4q5sRivdDQSL5bUp3LKcK80Bx+FRw3UETIlX6mYMLL9PJQ== +"@aws-sdk/client-sso@3.799.0": + version "3.799.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/client-sso/-/client-sso-3.799.0.tgz#4e1e0831100a93147e9cfb8b29bcee88344effa0" + integrity sha512-/i/LG7AiWPmPxKCA2jnR2zaf7B3HYSTbxaZI21ElIz9wASlNAsKr8CnLY7qb50kOyXiNfQ834S5Q3Gl8dX9o3Q== dependencies: "@aws-crypto/sha256-browser" "5.2.0" "@aws-crypto/sha256-js" "5.2.0" - "@aws-sdk/core" "3.775.0" + "@aws-sdk/core" "3.799.0" "@aws-sdk/middleware-host-header" "3.775.0" "@aws-sdk/middleware-logger" "3.775.0" "@aws-sdk/middleware-recursion-detection" "3.775.0" - "@aws-sdk/middleware-user-agent" "3.787.0" + "@aws-sdk/middleware-user-agent" "3.799.0" "@aws-sdk/region-config-resolver" "3.775.0" "@aws-sdk/types" "3.775.0" "@aws-sdk/util-endpoints" "3.787.0" "@aws-sdk/util-user-agent-browser" "3.775.0" - "@aws-sdk/util-user-agent-node" "3.787.0" + "@aws-sdk/util-user-agent-node" "3.799.0" "@smithy/config-resolver" "^4.1.0" - "@smithy/core" "^3.2.0" + "@smithy/core" "^3.3.0" "@smithy/fetch-http-handler" "^5.0.2" "@smithy/hash-node" "^4.0.2" "@smithy/invalid-dependency" "^4.0.2" "@smithy/middleware-content-length" "^4.0.2" - "@smithy/middleware-endpoint" "^4.1.0" - "@smithy/middleware-retry" "^4.1.0" + "@smithy/middleware-endpoint" "^4.1.1" + "@smithy/middleware-retry" "^4.1.1" "@smithy/middleware-serde" "^4.0.3" "@smithy/middleware-stack" "^4.0.2" "@smithy/node-config-provider" "^4.0.2" "@smithy/node-http-handler" "^4.0.4" "@smithy/protocol-http" "^5.1.0" - "@smithy/smithy-client" "^4.2.0" + "@smithy/smithy-client" "^4.2.1" "@smithy/types" "^4.2.0" "@smithy/url-parser" "^4.0.2" "@smithy/util-base64" "^4.0.0" "@smithy/util-body-length-browser" "^4.0.0" "@smithy/util-body-length-node" "^4.0.0" - "@smithy/util-defaults-mode-browser" "^4.0.8" - "@smithy/util-defaults-mode-node" "^4.0.8" + "@smithy/util-defaults-mode-browser" "^4.0.9" + "@smithy/util-defaults-mode-node" "^4.0.9" "@smithy/util-endpoints" "^3.0.2" "@smithy/util-middleware" "^4.0.2" "@smithy/util-retry" "^4.0.2" "@smithy/util-utf8" "^4.0.0" tslib "^2.6.2" -"@aws-sdk/core@3.775.0": - version "3.775.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/core/-/core-3.775.0.tgz#5d22ba78f07c07b48fb4d5b18172b9a896c0cbd0" - integrity sha512-8vpW4WihVfz0DX+7WnnLGm3GuQER++b0IwQG35JlQMlgqnc44M//KbJPsIHA0aJUJVwJAEShgfr5dUbY8WUzaA== +"@aws-sdk/core@3.799.0": + version "3.799.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/core/-/core-3.799.0.tgz#383f903ede137df108dcd5f817074515d2b1242e" + integrity sha512-hkKF3Zpc6+H8GI1rlttYVRh9uEE77cqAzLmLpY3iu7sql8cZgPERRBfaFct8p1SaDyrksLNiboD1vKW58mbsYg== dependencies: "@aws-sdk/types" "3.775.0" - "@smithy/core" "^3.2.0" + "@smithy/core" "^3.3.0" "@smithy/node-config-provider" "^4.0.2" "@smithy/property-provider" "^4.0.2" "@smithy/protocol-http" "^5.1.0" - "@smithy/signature-v4" "^5.0.2" - "@smithy/smithy-client" "^4.2.0" + "@smithy/signature-v4" "^5.1.0" + "@smithy/smithy-client" "^4.2.1" "@smithy/types" "^4.2.0" "@smithy/util-middleware" "^4.0.2" fast-xml-parser "4.4.1" tslib "^2.6.2" -"@aws-sdk/credential-provider-cognito-identity@3.787.0": - version "3.787.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-cognito-identity/-/credential-provider-cognito-identity-3.787.0.tgz#3c19fe9eb001d2b20adb793776a9fedc68684d39" - integrity sha512-nF5XjgvZHFuyttOeTjMgfEsg6slZPQ6uI34yzq12Kq4icFgcD4bQsijnQClMN7A0u5qR8Ad8kume4b7+I2++Ig== +"@aws-sdk/credential-provider-cognito-identity@3.799.0": + version "3.799.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-cognito-identity/-/credential-provider-cognito-identity-3.799.0.tgz#eba1a19bb7bacd37371e2e0dbd3f126145ef9d88" + integrity sha512-qHOqGsvt/z1bvjJRzndW8VaRfbGBhoETZpoRYNbfCbrNH2IRM98KRUlYH1EJ1wFFkT0gUDJr+oIOUCvRlgRW1Q== dependencies: - "@aws-sdk/client-cognito-identity" "3.787.0" + "@aws-sdk/client-cognito-identity" "3.799.0" "@aws-sdk/types" "3.775.0" "@smithy/property-provider" "^4.0.2" "@smithy/types" "^4.2.0" tslib "^2.6.2" -"@aws-sdk/credential-provider-env@3.775.0": - version "3.775.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-env/-/credential-provider-env-3.775.0.tgz#b8c81818f4c62d89b5f04dc410ab9b48e954f22c" - integrity sha512-6ESVxwCbGm7WZ17kY1fjmxQud43vzJFoLd4bmlR+idQSWdqlzGDYdcfzpjDKTcivdtNrVYmFvcH1JBUwCRAZhw== +"@aws-sdk/credential-provider-env@3.799.0": + version "3.799.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-env/-/credential-provider-env-3.799.0.tgz#d933265b54b18ef1232762c318ff0d75bc7785f9" + integrity sha512-vT/SSWtbUIOW/U21qgEySmmO44SFWIA7WeQPX1OrI8WJ5n7OEI23JWLHjLvHTkYmuZK6z1rPcv7HzRgmuGRibA== dependencies: - "@aws-sdk/core" "3.775.0" + "@aws-sdk/core" "3.799.0" "@aws-sdk/types" "3.775.0" "@smithy/property-provider" "^4.0.2" "@smithy/types" "^4.2.0" tslib "^2.6.2" -"@aws-sdk/credential-provider-http@3.775.0": - version "3.775.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-http/-/credential-provider-http-3.775.0.tgz#0fbc7f4e6cada37fc9b647de0d7c12a42a44bcc6" - integrity sha512-PjDQeDH/J1S0yWV32wCj2k5liRo0ssXMseCBEkCsD3SqsU8o5cU82b0hMX4sAib/RkglCSZqGO0xMiN0/7ndww== +"@aws-sdk/credential-provider-http@3.799.0": + version "3.799.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-http/-/credential-provider-http-3.799.0.tgz#9286235bb30c4f22fbeac0ecf2fe5e5f99aaa282" + integrity sha512-2CjBpOWmhaPAExOgHnIB5nOkS5ef+mfRlJ1JC4nsnjAx0nrK4tk0XRE0LYz11P3+ue+a86cU8WTmBo+qjnGxPQ== dependencies: - "@aws-sdk/core" "3.775.0" + "@aws-sdk/core" "3.799.0" "@aws-sdk/types" "3.775.0" "@smithy/fetch-http-handler" "^5.0.2" "@smithy/node-http-handler" "^4.0.4" "@smithy/property-provider" "^4.0.2" "@smithy/protocol-http" "^5.1.0" - "@smithy/smithy-client" "^4.2.0" + "@smithy/smithy-client" "^4.2.1" "@smithy/types" "^4.2.0" "@smithy/util-stream" "^4.2.0" tslib "^2.6.2" -"@aws-sdk/credential-provider-ini@3.787.0": - version "3.787.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-ini/-/credential-provider-ini-3.787.0.tgz#906ece004141462ae695504b6c07d1200688fd6c" - integrity sha512-hc2taRoDlXn2uuNuHWDJljVWYrp3r9JF1a/8XmOAZhVUNY+ImeeStylHXhXXKEA4JOjW+5PdJj0f1UDkVCHJiQ== - dependencies: - "@aws-sdk/core" "3.775.0" - "@aws-sdk/credential-provider-env" "3.775.0" - "@aws-sdk/credential-provider-http" "3.775.0" - "@aws-sdk/credential-provider-process" "3.775.0" - "@aws-sdk/credential-provider-sso" "3.787.0" - "@aws-sdk/credential-provider-web-identity" "3.787.0" - "@aws-sdk/nested-clients" "3.787.0" +"@aws-sdk/credential-provider-ini@3.799.0": + version "3.799.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-ini/-/credential-provider-ini-3.799.0.tgz#89ed328e40d2bf0c37453c26b1dd74201c61da2c" + integrity sha512-M9ubILFxerqw4QJwk83MnjtZyoA2eNCiea5V+PzZeHlwk2PON/EnawKqy65x9/hMHGoSvvNuby7iMAmPptu7yw== + dependencies: + "@aws-sdk/core" "3.799.0" + "@aws-sdk/credential-provider-env" "3.799.0" + "@aws-sdk/credential-provider-http" "3.799.0" + "@aws-sdk/credential-provider-process" "3.799.0" + "@aws-sdk/credential-provider-sso" "3.799.0" + "@aws-sdk/credential-provider-web-identity" "3.799.0" + "@aws-sdk/nested-clients" "3.799.0" "@aws-sdk/types" "3.775.0" "@smithy/credential-provider-imds" "^4.0.2" "@smithy/property-provider" "^4.0.2" @@ -290,17 +290,17 @@ "@smithy/types" "^4.2.0" tslib "^2.6.2" -"@aws-sdk/credential-provider-node@3.787.0": - version "3.787.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-node/-/credential-provider-node-3.787.0.tgz#3e5cdafb0fecca25b7430f848cbca85000b25c33" - integrity sha512-JioVi44B1vDMaK2CdzqimwvJD3uzvzbQhaEWXsGMBcMcNHajXAXf08EF50JG3ZhLrhhUsT1ObXpbTaPINOhh+g== - dependencies: - "@aws-sdk/credential-provider-env" "3.775.0" - "@aws-sdk/credential-provider-http" "3.775.0" - "@aws-sdk/credential-provider-ini" "3.787.0" - "@aws-sdk/credential-provider-process" "3.775.0" - "@aws-sdk/credential-provider-sso" "3.787.0" - "@aws-sdk/credential-provider-web-identity" "3.787.0" +"@aws-sdk/credential-provider-node@3.799.0": + version "3.799.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-node/-/credential-provider-node-3.799.0.tgz#45e646a24f105782dbaf3c55951dbae32ae73074" + integrity sha512-nd9fSJc0wUlgKUkIr2ldJhcIIrzJFS29AGZoyY22J3xih63nNDv61eTGVMsDZzHlV21XzMlPEljTR7axiimckg== + dependencies: + "@aws-sdk/credential-provider-env" "3.799.0" + "@aws-sdk/credential-provider-http" "3.799.0" + "@aws-sdk/credential-provider-ini" "3.799.0" + "@aws-sdk/credential-provider-process" "3.799.0" + "@aws-sdk/credential-provider-sso" "3.799.0" + "@aws-sdk/credential-provider-web-identity" "3.799.0" "@aws-sdk/types" "3.775.0" "@smithy/credential-provider-imds" "^4.0.2" "@smithy/property-provider" "^4.0.2" @@ -308,63 +308,63 @@ "@smithy/types" "^4.2.0" tslib "^2.6.2" -"@aws-sdk/credential-provider-process@3.775.0": - version "3.775.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-process/-/credential-provider-process-3.775.0.tgz#7ab90383f12461c5d20546e933924e654660542b" - integrity sha512-A6k68H9rQp+2+7P7SGO90Csw6nrUEm0Qfjpn9Etc4EboZhhCLs9b66umUsTsSBHus4FDIe5JQxfCUyt1wgNogg== +"@aws-sdk/credential-provider-process@3.799.0": + version "3.799.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-process/-/credential-provider-process-3.799.0.tgz#34e8b3d7c889bbb87dfe7c171255a8b99a34df25" + integrity sha512-g8jmNs2k98WNHMYcea1YKA+7ao2Ma4w0P42Dz4YpcI155pQHxHx25RwbOG+rsAKuo3bKwkW53HVE/ZTKhcWFgw== dependencies: - "@aws-sdk/core" "3.775.0" + "@aws-sdk/core" "3.799.0" "@aws-sdk/types" "3.775.0" "@smithy/property-provider" "^4.0.2" "@smithy/shared-ini-file-loader" "^4.0.2" "@smithy/types" "^4.2.0" tslib "^2.6.2" -"@aws-sdk/credential-provider-sso@3.787.0": - version "3.787.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-sso/-/credential-provider-sso-3.787.0.tgz#77ab6c01e4497d7ff2e6c7f081f3d8695744884b" - integrity sha512-fHc08bsvwm4+dEMEQKnQ7c1irEQmmxbgS+Fq41y09pPvPh31nAhoMcjBSTWAaPHvvsRbTYvmP4Mf12ZGr8/nfg== +"@aws-sdk/credential-provider-sso@3.799.0": + version "3.799.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-sso/-/credential-provider-sso-3.799.0.tgz#535dd1d1abe5f2567551514444f18b79993ac92e" + integrity sha512-lQv27QkNU9FJFZqEf5DIEN3uXEN409Iaym9WJzhOouGtxvTIAWiD23OYh1u8PvBdrordJGS2YddfQvhcmq9akw== dependencies: - "@aws-sdk/client-sso" "3.787.0" - "@aws-sdk/core" "3.775.0" - "@aws-sdk/token-providers" "3.787.0" + "@aws-sdk/client-sso" "3.799.0" + "@aws-sdk/core" "3.799.0" + "@aws-sdk/token-providers" "3.799.0" "@aws-sdk/types" "3.775.0" "@smithy/property-provider" "^4.0.2" "@smithy/shared-ini-file-loader" "^4.0.2" "@smithy/types" "^4.2.0" tslib "^2.6.2" -"@aws-sdk/credential-provider-web-identity@3.787.0": - version "3.787.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-web-identity/-/credential-provider-web-identity-3.787.0.tgz#d492d1f4a90b70f3a71a65f11b8d3ef79fb2759e" - integrity sha512-SobmCwNbk6TfEsF283mZPQEI5vV2j6eY5tOCj8Er4Lzraxu9fBPADV+Bib2A8F6jlB1lMPJzOuDCbEasSt/RIw== +"@aws-sdk/credential-provider-web-identity@3.799.0": + version "3.799.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-web-identity/-/credential-provider-web-identity-3.799.0.tgz#ddf6c4e6f692289ba9e5db3ba9c63564742e5533" + integrity sha512-8k1i9ut+BEg0QZ+I6UQMxGNR1T8paLmAOAZXU+nLQR0lcxS6lr8v+dqofgzQPuHLBkWNCr1Av1IKeL3bJjgU7g== dependencies: - "@aws-sdk/core" "3.775.0" - "@aws-sdk/nested-clients" "3.787.0" + "@aws-sdk/core" "3.799.0" + "@aws-sdk/nested-clients" "3.799.0" "@aws-sdk/types" "3.775.0" "@smithy/property-provider" "^4.0.2" "@smithy/types" "^4.2.0" tslib "^2.6.2" "@aws-sdk/credential-providers@^3.583.0": - version "3.787.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/credential-providers/-/credential-providers-3.787.0.tgz#3ec6d1e17b7f468393f738317350ca572ebf79b0" - integrity sha512-kR3RtI7drOc9pho13vWbUC2Bvrx9A0G4iizBDGmTs08NOdg4w3c1I4kdLG9tyPiIMeVnH+wYrsli5CM7xIfqiA== - dependencies: - "@aws-sdk/client-cognito-identity" "3.787.0" - "@aws-sdk/core" "3.775.0" - "@aws-sdk/credential-provider-cognito-identity" "3.787.0" - "@aws-sdk/credential-provider-env" "3.775.0" - "@aws-sdk/credential-provider-http" "3.775.0" - "@aws-sdk/credential-provider-ini" "3.787.0" - "@aws-sdk/credential-provider-node" "3.787.0" - "@aws-sdk/credential-provider-process" "3.775.0" - "@aws-sdk/credential-provider-sso" "3.787.0" - "@aws-sdk/credential-provider-web-identity" "3.787.0" - "@aws-sdk/nested-clients" "3.787.0" + version "3.799.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/credential-providers/-/credential-providers-3.799.0.tgz#f86cff0bdaef9762b56132977186c4ec1e3249cd" + integrity sha512-Gk10skoEri6zsCPxn34Zpu6Z1B5R3RLwqDw1krNl+B1P749gB6i7XULXZUOotqpum0T0q4euOwAB8XWuTOkKew== + dependencies: + "@aws-sdk/client-cognito-identity" "3.799.0" + "@aws-sdk/core" "3.799.0" + "@aws-sdk/credential-provider-cognito-identity" "3.799.0" + "@aws-sdk/credential-provider-env" "3.799.0" + "@aws-sdk/credential-provider-http" "3.799.0" + "@aws-sdk/credential-provider-ini" "3.799.0" + "@aws-sdk/credential-provider-node" "3.799.0" + "@aws-sdk/credential-provider-process" "3.799.0" + "@aws-sdk/credential-provider-sso" "3.799.0" + "@aws-sdk/credential-provider-web-identity" "3.799.0" + "@aws-sdk/nested-clients" "3.799.0" "@aws-sdk/types" "3.775.0" "@smithy/config-resolver" "^4.1.0" - "@smithy/core" "^3.2.0" + "@smithy/core" "^3.3.0" "@smithy/credential-provider-imds" "^4.0.2" "@smithy/node-config-provider" "^4.0.2" "@smithy/property-provider" "^4.0.2" @@ -400,57 +400,57 @@ "@smithy/types" "^4.2.0" tslib "^2.6.2" -"@aws-sdk/middleware-user-agent@3.787.0": - version "3.787.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/middleware-user-agent/-/middleware-user-agent-3.787.0.tgz#3d657c0ba1aec72bca079f4691ba20f25569fcfc" - integrity sha512-Lnfj8SmPLYtrDFthNIaNj66zZsBCam+E4XiUDr55DIHTGstH6qZ/q6vg0GfbukxwSmUcGMwSR4Qbn8rb8yd77g== +"@aws-sdk/middleware-user-agent@3.799.0": + version "3.799.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/middleware-user-agent/-/middleware-user-agent-3.799.0.tgz#e120e6e1341bcba5427cee0385172170e4615186" + integrity sha512-TropQZanbOTxa+p+Nl4fWkzlRhgFwDfW+Wb6TR3jZN7IXHNlPpgGFpdrgvBExhW/RBhqr+94OsR8Ou58lp3hhA== dependencies: - "@aws-sdk/core" "3.775.0" + "@aws-sdk/core" "3.799.0" "@aws-sdk/types" "3.775.0" "@aws-sdk/util-endpoints" "3.787.0" - "@smithy/core" "^3.2.0" + "@smithy/core" "^3.3.0" "@smithy/protocol-http" "^5.1.0" "@smithy/types" "^4.2.0" tslib "^2.6.2" -"@aws-sdk/nested-clients@3.787.0": - version "3.787.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/nested-clients/-/nested-clients-3.787.0.tgz#e8a5a6e7d0b599a7f9f15b900d3223ad080b0a81" - integrity sha512-xk03q1xpKNHgbuo+trEf1dFrI239kuMmjKKsqLEsHlAZbuFq4yRGMlHBrVMnKYOPBhVFDS/VineM991XI52fKg== +"@aws-sdk/nested-clients@3.799.0": + version "3.799.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/nested-clients/-/nested-clients-3.799.0.tgz#a3b223cfa22f809cee28eedea2ce1f30175665f9" + integrity sha512-zILlWh7asrcQG9JYMYgnvEQBfwmWKfED0yWCf3UNAmQcfS9wkCAWCgicNy/y5KvNvEYnHidsU117STtyuUNG5g== dependencies: "@aws-crypto/sha256-browser" "5.2.0" "@aws-crypto/sha256-js" "5.2.0" - "@aws-sdk/core" "3.775.0" + "@aws-sdk/core" "3.799.0" "@aws-sdk/middleware-host-header" "3.775.0" "@aws-sdk/middleware-logger" "3.775.0" "@aws-sdk/middleware-recursion-detection" "3.775.0" - "@aws-sdk/middleware-user-agent" "3.787.0" + "@aws-sdk/middleware-user-agent" "3.799.0" "@aws-sdk/region-config-resolver" "3.775.0" "@aws-sdk/types" "3.775.0" "@aws-sdk/util-endpoints" "3.787.0" "@aws-sdk/util-user-agent-browser" "3.775.0" - "@aws-sdk/util-user-agent-node" "3.787.0" + "@aws-sdk/util-user-agent-node" "3.799.0" "@smithy/config-resolver" "^4.1.0" - "@smithy/core" "^3.2.0" + "@smithy/core" "^3.3.0" "@smithy/fetch-http-handler" "^5.0.2" "@smithy/hash-node" "^4.0.2" "@smithy/invalid-dependency" "^4.0.2" "@smithy/middleware-content-length" "^4.0.2" - "@smithy/middleware-endpoint" "^4.1.0" - "@smithy/middleware-retry" "^4.1.0" + "@smithy/middleware-endpoint" "^4.1.1" + "@smithy/middleware-retry" "^4.1.1" "@smithy/middleware-serde" "^4.0.3" "@smithy/middleware-stack" "^4.0.2" "@smithy/node-config-provider" "^4.0.2" "@smithy/node-http-handler" "^4.0.4" "@smithy/protocol-http" "^5.1.0" - "@smithy/smithy-client" "^4.2.0" + "@smithy/smithy-client" "^4.2.1" "@smithy/types" "^4.2.0" "@smithy/url-parser" "^4.0.2" "@smithy/util-base64" "^4.0.0" "@smithy/util-body-length-browser" "^4.0.0" "@smithy/util-body-length-node" "^4.0.0" - "@smithy/util-defaults-mode-browser" "^4.0.8" - "@smithy/util-defaults-mode-node" "^4.0.8" + "@smithy/util-defaults-mode-browser" "^4.0.9" + "@smithy/util-defaults-mode-node" "^4.0.9" "@smithy/util-endpoints" "^3.0.2" "@smithy/util-middleware" "^4.0.2" "@smithy/util-retry" "^4.0.2" @@ -485,12 +485,12 @@ "@smithy/signature-v4" "^1.0.1" tslib "^2.5.0" -"@aws-sdk/token-providers@3.787.0": - version "3.787.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/token-providers/-/token-providers-3.787.0.tgz#18c761fb21ee25c8c3a35703876f0c733b4ae743" - integrity sha512-d7/NIqxq308Zg0RPMNrmn0QvzniL4Hx8Qdwzr6YZWLYAbUSvZYS2ppLR3BFWSkV6SsTJUx8BuDaj3P8vttkrog== +"@aws-sdk/token-providers@3.799.0": + version "3.799.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/token-providers/-/token-providers-3.799.0.tgz#7b2cc6aa5b1a1058490b780ff975de29218ef3a0" + integrity sha512-/8iDjnsJs/D8AhGbDAmdF5oSHzE4jsDsM2RIIxmBAKTZXkaaclQBNX9CmAqLKQmO3IUMZsDH2KENHLVAk/N/mw== dependencies: - "@aws-sdk/nested-clients" "3.787.0" + "@aws-sdk/nested-clients" "3.799.0" "@aws-sdk/types" "3.775.0" "@smithy/property-provider" "^4.0.2" "@smithy/shared-ini-file-loader" "^4.0.2" @@ -532,12 +532,12 @@ bowser "^2.11.0" tslib "^2.6.2" -"@aws-sdk/util-user-agent-node@3.787.0": - version "3.787.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/util-user-agent-node/-/util-user-agent-node-3.787.0.tgz#58e63e99586cde1c1314f74b94596780321442f5" - integrity sha512-mG7Lz8ydfG4SF9e8WSXiPQ/Lsn3n8A5B5jtPROidafi06I3ckV2WxyMLdwG14m919NoS6IOfWHyRGSqWIwbVKA== +"@aws-sdk/util-user-agent-node@3.799.0": + version "3.799.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/util-user-agent-node/-/util-user-agent-node-3.799.0.tgz#8d0794add4efc79830143277f5faa27f16531c7a" + integrity sha512-iXBk38RbIWPF5Nq9O4AnktORAzXovSVqWYClvS1qbE7ILsnTLJbagU9HlU25O2iV5COVh1qZkwuP5NHQ2yTEyw== dependencies: - "@aws-sdk/middleware-user-agent" "3.787.0" + "@aws-sdk/middleware-user-agent" "3.799.0" "@aws-sdk/types" "3.775.0" "@smithy/node-config-provider" "^4.0.2" "@smithy/types" "^4.2.0" @@ -550,35 +550,35 @@ dependencies: tslib "^2.3.1" -"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.12.13", "@babel/code-frame@^7.22.13", "@babel/code-frame@^7.26.2": - version "7.26.2" - resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.26.2.tgz#4b5fab97d33338eff916235055f0ebc21e573a85" - integrity sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ== +"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.12.13", "@babel/code-frame@^7.22.13", "@babel/code-frame@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.27.1.tgz#200f715e66d52a23b221a9435534a91cc13ad5be" + integrity sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg== dependencies: - "@babel/helper-validator-identifier" "^7.25.9" + "@babel/helper-validator-identifier" "^7.27.1" js-tokens "^4.0.0" - picocolors "^1.0.0" + picocolors "^1.1.1" -"@babel/compat-data@^7.26.8": - version "7.26.8" - resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.26.8.tgz#821c1d35641c355284d4a870b8a4a7b0c141e367" - integrity sha512-oH5UPLMWR3L2wEFLnFJ1TZXqHufiTKAiLfqw5zkhS4dKXLJ10yVztfil/twG8EDTA4F/tvVNw9nOl4ZMslB8rQ== +"@babel/compat-data@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.27.1.tgz#db7cf122745e0a332c44e847ddc4f5e5221a43f6" + integrity sha512-Q+E+rd/yBzNQhXkG+zQnF58e4zoZfBedaxwzPmicKsiK3nt8iJYrSrDbjwFFDGC4f+rPafqRaPH6TsDoSvMf7A== "@babel/core@^7.11.6", "@babel/core@^7.12.3", "@babel/core@^7.23.9": - version "7.26.10" - resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.26.10.tgz#5c876f83c8c4dcb233ee4b670c0606f2ac3000f9" - integrity sha512-vMqyb7XCDMPvJFFOaT9kxtiRh42GwlZEg1/uIgtZshS5a/8OaduUfCi7kynKgc3Tw/6Uo2D+db9qBttghhmxwQ== + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.27.1.tgz#89de51e86bd12246003e3524704c49541b16c3e6" + integrity sha512-IaaGWsQqfsQWVLqMn9OB92MNN7zukfVA4s7KKAI0KfrrDsZ0yhi5uV4baBuLuN7n3vsZpwP8asPPcVwApxvjBQ== dependencies: "@ampproject/remapping" "^2.2.0" - "@babel/code-frame" "^7.26.2" - "@babel/generator" "^7.26.10" - "@babel/helper-compilation-targets" "^7.26.5" - "@babel/helper-module-transforms" "^7.26.0" - "@babel/helpers" "^7.26.10" - "@babel/parser" "^7.26.10" - "@babel/template" "^7.26.9" - "@babel/traverse" "^7.26.10" - "@babel/types" "^7.26.10" + "@babel/code-frame" "^7.27.1" + "@babel/generator" "^7.27.1" + "@babel/helper-compilation-targets" "^7.27.1" + "@babel/helper-module-transforms" "^7.27.1" + "@babel/helpers" "^7.27.1" + "@babel/parser" "^7.27.1" + "@babel/template" "^7.27.1" + "@babel/traverse" "^7.27.1" + "@babel/types" "^7.27.1" convert-source-map "^2.0.0" debug "^4.1.0" gensync "^1.0.0-beta.2" @@ -594,24 +594,24 @@ jsesc "^2.5.1" source-map "^0.5.0" -"@babel/generator@^7.23.0", "@babel/generator@^7.26.10", "@babel/generator@^7.27.0", "@babel/generator@^7.7.2": - version "7.27.0" - resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.27.0.tgz#764382b5392e5b9aff93cadb190d0745866cbc2c" - integrity sha512-VybsKvpiN1gU1sdMZIp7FcqphVVKEwcuj02x73uvcHE0PTihx1nlBcowYWhDwjpoAXRv43+gDzyggGnn1XZhVw== +"@babel/generator@^7.23.0", "@babel/generator@^7.27.1", "@babel/generator@^7.7.2": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.27.1.tgz#862d4fad858f7208edd487c28b58144036b76230" + integrity sha512-UnJfnIpc/+JO0/+KRVQNGU+y5taA5vCbwN8+azkX6beii/ZF+enZJSOKo11ZSzGJjlNfJHfQtmQT8H+9TXPG2w== dependencies: - "@babel/parser" "^7.27.0" - "@babel/types" "^7.27.0" + "@babel/parser" "^7.27.1" + "@babel/types" "^7.27.1" "@jridgewell/gen-mapping" "^0.3.5" "@jridgewell/trace-mapping" "^0.3.25" jsesc "^3.0.2" -"@babel/helper-compilation-targets@^7.26.5": - version "7.27.0" - resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.27.0.tgz#de0c753b1cd1d9ab55d473c5a5cf7170f0a81880" - integrity sha512-LVk7fbXml0H2xH34dFzKQ7TDZ2G4/rVTOrq9V+icbbadjbVxxeFeDsNHv2SrZeWoA+6ZiTyWYWtScEIW07EAcA== +"@babel/helper-compilation-targets@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.27.1.tgz#eac1096c7374f161e4f33fc8ae38f4ddf122087a" + integrity sha512-2YaDd/Rd9E598B5+WIc8wJPmWETiiJXFYVE60oX8FDohv7rAUU3CQj+A1MgeEmcsk2+dQuEjIe/GDvig0SqL4g== dependencies: - "@babel/compat-data" "^7.26.8" - "@babel/helper-validator-option" "^7.25.9" + "@babel/compat-data" "^7.27.1" + "@babel/helper-validator-option" "^7.27.1" browserslist "^4.24.0" lru-cache "^5.1.1" semver "^6.3.1" @@ -638,27 +638,27 @@ dependencies: "@babel/types" "^7.24.7" -"@babel/helper-module-imports@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.25.9.tgz#e7f8d20602ebdbf9ebbea0a0751fb0f2a4141715" - integrity sha512-tnUA4RsrmflIM6W6RFTLFSXITtl0wKjgpnLgXyowocVPrbYrLUXSBXDgTs8BlbmIzIdlBySRQjINYs2BAkiLtw== +"@babel/helper-module-imports@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.27.1.tgz#7ef769a323e2655e126673bb6d2d6913bbead204" + integrity sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w== dependencies: - "@babel/traverse" "^7.25.9" - "@babel/types" "^7.25.9" + "@babel/traverse" "^7.27.1" + "@babel/types" "^7.27.1" -"@babel/helper-module-transforms@^7.26.0": - version "7.26.0" - resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.26.0.tgz#8ce54ec9d592695e58d84cd884b7b5c6a2fdeeae" - integrity sha512-xO+xu6B5K2czEnQye6BHA7DolFFmS3LB7stHZFaOLb1pAwO1HWLS8fXA+eh0A2yIvltPVmx3eNNDBJA2SLHXFw== +"@babel/helper-module-transforms@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.27.1.tgz#e1663b8b71d2de948da5c4fb2a20ca4f3ec27a6f" + integrity sha512-9yHn519/8KvTU5BjTVEEeIM3w9/2yXNKoD82JifINImhpKkARMJKPP59kLo+BafpdN5zgNeIcS4jsGDmd3l58g== dependencies: - "@babel/helper-module-imports" "^7.25.9" - "@babel/helper-validator-identifier" "^7.25.9" - "@babel/traverse" "^7.25.9" + "@babel/helper-module-imports" "^7.27.1" + "@babel/helper-validator-identifier" "^7.27.1" + "@babel/traverse" "^7.27.1" -"@babel/helper-plugin-utils@^7.0.0", "@babel/helper-plugin-utils@^7.10.4", "@babel/helper-plugin-utils@^7.12.13", "@babel/helper-plugin-utils@^7.14.5", "@babel/helper-plugin-utils@^7.25.9", "@babel/helper-plugin-utils@^7.8.0": - version "7.26.5" - resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.26.5.tgz#18580d00c9934117ad719392c4f6585c9333cc35" - integrity sha512-RS+jZcRdZdRFzMyr+wcsaqOmld1/EqTghfaBGQQd/WnRdzdlvSZ//kF7U8VQTxf1ynZ4cjUcYgjVGx13ewNPMg== +"@babel/helper-plugin-utils@^7.0.0", "@babel/helper-plugin-utils@^7.10.4", "@babel/helper-plugin-utils@^7.12.13", "@babel/helper-plugin-utils@^7.14.5", "@babel/helper-plugin-utils@^7.27.1", "@babel/helper-plugin-utils@^7.8.0": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.27.1.tgz#ddb2f876534ff8013e6c2b299bf4d39b3c51d44c" + integrity sha512-1gn1Up5YXka3YYAHGKpbideQ5Yjf1tDa9qYcgysz+cNCXukyLl6DjPXhD3VRwSb8c0J9tA4b2+rHEZtc6R0tlw== "@babel/helper-split-export-declaration@^7.22.6": version "7.24.7" @@ -667,35 +667,35 @@ dependencies: "@babel/types" "^7.24.7" -"@babel/helper-string-parser@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.25.9.tgz#1aabb72ee72ed35789b4bbcad3ca2862ce614e8c" - integrity sha512-4A/SCr/2KLd5jrtOMFzaKjVtAei3+2r/NChoBNoZ3EyP/+GlhoaEGoWOZUmFmoITP7zOJyHIMm+DYRd8o3PvHA== +"@babel/helper-string-parser@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz#54da796097ab19ce67ed9f88b47bb2ec49367687" + integrity sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA== -"@babel/helper-validator-identifier@^7.16.7", "@babel/helper-validator-identifier@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.25.9.tgz#24b64e2c3ec7cd3b3c547729b8d16871f22cbdc7" - integrity sha512-Ed61U6XJc3CVRfkERJWDz4dJwKe7iLmmJsbOGu9wSloNSFttHV0I8g6UAgb7qnK5ly5bGLPd4oXZlxCdANBOWQ== +"@babel/helper-validator-identifier@^7.16.7", "@babel/helper-validator-identifier@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.27.1.tgz#a7054dcc145a967dd4dc8fee845a57c1316c9df8" + integrity sha512-D2hP9eA+Sqx1kBZgzxZh0y1trbuU+JoDkiEwqhQ36nodYqJwyEIhPSdMNd7lOm/4io72luTPWH20Yda0xOuUow== -"@babel/helper-validator-option@^7.25.9": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.25.9.tgz#86e45bd8a49ab7e03f276577f96179653d41da72" - integrity sha512-e/zv1co8pp55dNdEcCynfj9X7nyUKUXoUEwfXqaZt0omVOmDe9oOTdKStH4GmAw6zxMFs50ZayuMfHDKlO7Tfw== +"@babel/helper-validator-option@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz#fa52f5b1e7db1ab049445b421c4471303897702f" + integrity sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg== -"@babel/helpers@^7.26.10": - version "7.27.0" - resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.27.0.tgz#53d156098defa8243eab0f32fa17589075a1b808" - integrity sha512-U5eyP/CTFPuNE3qk+WZMxFkp/4zUzdceQlfzf7DdGdhp+Fezd7HD+i8Y24ZuTMKX3wQBld449jijbGq6OdGNQg== +"@babel/helpers@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.27.1.tgz#ffc27013038607cdba3288e692c3611c06a18aa4" + integrity sha512-FCvFTm0sWV8Fxhpp2McP5/W53GPllQ9QeQ7SiqGWjMf/LVG07lFa5+pgK05IRhVwtvafT22KF+ZSnM9I545CvQ== dependencies: - "@babel/template" "^7.27.0" - "@babel/types" "^7.27.0" + "@babel/template" "^7.27.1" + "@babel/types" "^7.27.1" -"@babel/parser@^7.1.0", "@babel/parser@^7.14.7", "@babel/parser@^7.20.5", "@babel/parser@^7.20.7", "@babel/parser@^7.23.0", "@babel/parser@^7.23.9", "@babel/parser@^7.26.10", "@babel/parser@^7.27.0": - version "7.27.0" - resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.27.0.tgz#3d7d6ee268e41d2600091cbd4e145ffee85a44ec" - integrity sha512-iaepho73/2Pz7w2eMS0Q5f83+0RKI7i4xmiYeBmDzfRVbQtTOG7Ts0S4HzJVsTMGI9keU8rNfuZr8DKfSt7Yyg== +"@babel/parser@^7.1.0", "@babel/parser@^7.14.7", "@babel/parser@^7.20.5", "@babel/parser@^7.20.7", "@babel/parser@^7.23.0", "@babel/parser@^7.23.9", "@babel/parser@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.27.1.tgz#c55d5bed74449d1223701f1869b9ee345cc94cc9" + integrity sha512-I0dZ3ZpCrJ1c04OqlNsQcKiZlsrXf/kkE4FXzID9rIOYICsAbA8mMDzhW/luRNAHdCNt7os/u8wenklZDlUVUQ== dependencies: - "@babel/types" "^7.27.0" + "@babel/types" "^7.27.1" "@babel/plugin-syntax-async-generators@^7.8.4": version "7.8.4" @@ -726,11 +726,11 @@ "@babel/helper-plugin-utils" "^7.14.5" "@babel/plugin-syntax-import-attributes@^7.24.7": - version "7.26.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.26.0.tgz#3b1412847699eea739b4f2602c74ce36f6b0b0f7" - integrity sha512-e2dttdsJ1ZTpi3B9UYGLw41hifAubg19AtCu/2I/F1QNVclOBr1dYpTdmdyZ84Xiz43BS/tCUkMAZNLv12Pi+A== + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.27.1.tgz#34c017d54496f9b11b61474e7ea3dfd5563ffe07" + integrity sha512-oFT0FrKHgF53f4vOsZGi2Hh3I35PfSmVs4IBFLFj4dnafP+hIWDLg3VyKmUHfLoLHlyxY4C7DGtmHuJgn+IGww== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" "@babel/plugin-syntax-import-meta@^7.10.4": version "7.10.4" @@ -747,11 +747,11 @@ "@babel/helper-plugin-utils" "^7.8.0" "@babel/plugin-syntax-jsx@^7.7.2": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.25.9.tgz#a34313a178ea56f1951599b929c1ceacee719290" - integrity sha512-ld6oezHQMZsZfp6pWtbjaNDF2tiiCYYDqQszHt5VV437lewP9aSi2Of99CK0D0XB21k7FLgnLcmQKyKzynfeAA== + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.27.1.tgz#2f9beb5eff30fa507c5532d107daac7b888fa34c" + integrity sha512-y8YTNIeKoyhGd9O0Jiyzyyqk8gdjnumGTQPsz0xOZOQ2RmkVJeZ1vmmfIvFEKqucBG6axJGBZDE/7iI5suUI/w== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" "@babel/plugin-syntax-logical-assignment-operators@^7.10.4": version "7.10.4" @@ -810,20 +810,20 @@ "@babel/helper-plugin-utils" "^7.14.5" "@babel/plugin-syntax-typescript@^7.7.2": - version "7.25.9" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.25.9.tgz#67dda2b74da43727cf21d46cf9afef23f4365399" - integrity sha512-hjMgRy5hb8uJJjUcdWunWVcoi9bGpJp8p5Ol1229PoN6aytsLwNMgmdftO23wnCLMfVmTwZDWMPNq/D1SY60JQ== + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.27.1.tgz#5147d29066a793450f220c63fa3a9431b7e6dd18" + integrity sha512-xfYCBMxveHrRMnAWl1ZlPXOZjzkN82THFvLhQhFXFt81Z5HnN+EtUkZhv/zcKpmT3fzmWZB0ywiBrbC3vogbwQ== dependencies: - "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-plugin-utils" "^7.27.1" -"@babel/template@^7.24.7", "@babel/template@^7.26.9", "@babel/template@^7.27.0", "@babel/template@^7.3.3": - version "7.27.0" - resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.27.0.tgz#b253e5406cc1df1c57dcd18f11760c2dbf40c0b4" - integrity sha512-2ncevenBqXI6qRMukPlXwHKHchC7RyMuu4xv5JBXRfOGVcTy1mXCD12qrp7Jsoxll1EV3+9sE4GugBVRjT2jFA== +"@babel/template@^7.24.7", "@babel/template@^7.27.1", "@babel/template@^7.3.3": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.27.1.tgz#b9e4f55c17a92312774dfbdde1b3c01c547bbae2" + integrity sha512-Fyo3ghWMqkHHpHQCoBs2VnYjR4iWFFjguTDEqA5WgZDOrFesVjMhMM2FSqTKSoUSDO1VQtavj8NFpdRBEvJTtg== dependencies: - "@babel/code-frame" "^7.26.2" - "@babel/parser" "^7.27.0" - "@babel/types" "^7.27.0" + "@babel/code-frame" "^7.27.1" + "@babel/parser" "^7.27.1" + "@babel/types" "^7.27.1" "@babel/traverse@7.23.2": version "7.23.2" @@ -841,16 +841,16 @@ debug "^4.1.0" globals "^11.1.0" -"@babel/traverse@^7.25.9", "@babel/traverse@^7.26.10": - version "7.27.0" - resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.27.0.tgz#11d7e644779e166c0442f9a07274d02cd91d4a70" - integrity sha512-19lYZFzYVQkkHkl4Cy4WrAVcqBkgvV2YM2TU3xG6DIwO7O3ecbDPfW3yM3bjAGcqcQHi+CCtjMR3dIEHxsd6bA== - dependencies: - "@babel/code-frame" "^7.26.2" - "@babel/generator" "^7.27.0" - "@babel/parser" "^7.27.0" - "@babel/template" "^7.27.0" - "@babel/types" "^7.27.0" +"@babel/traverse@^7.27.1": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.27.1.tgz#4db772902b133bbddd1c4f7a7ee47761c1b9f291" + integrity sha512-ZCYtZciz1IWJB4U61UPu4KEaqyfj+r5T1Q5mqPo+IBpcG9kHv30Z0aD8LXPgC1trYa6rK0orRyAhqUgk4MjmEg== + dependencies: + "@babel/code-frame" "^7.27.1" + "@babel/generator" "^7.27.1" + "@babel/parser" "^7.27.1" + "@babel/template" "^7.27.1" + "@babel/types" "^7.27.1" debug "^4.3.1" globals "^11.1.0" @@ -862,13 +862,13 @@ "@babel/helper-validator-identifier" "^7.16.7" to-fast-properties "^2.0.0" -"@babel/types@^7.0.0", "@babel/types@^7.17.0", "@babel/types@^7.20.7", "@babel/types@^7.23.0", "@babel/types@^7.24.7", "@babel/types@^7.25.9", "@babel/types@^7.26.10", "@babel/types@^7.27.0", "@babel/types@^7.3.3": - version "7.27.0" - resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.27.0.tgz#ef9acb6b06c3173f6632d993ecb6d4ae470b4559" - integrity sha512-H45s8fVLYjbhFH62dIJ3WtmJ6RSPt/3DRO0ZcT2SUiYiQyz3BLVb9ADEnLl91m74aQPS3AzzeajZHYOalWe3bg== +"@babel/types@^7.0.0", "@babel/types@^7.17.0", "@babel/types@^7.20.7", "@babel/types@^7.23.0", "@babel/types@^7.24.7", "@babel/types@^7.27.1", "@babel/types@^7.3.3": + version "7.27.1" + resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.27.1.tgz#9defc53c16fc899e46941fc6901a9eea1c9d8560" + integrity sha512-+EzkxvLNfiUeKMgy/3luqfsCWFRXLb7U6wNQTk60tovuckwB15B191tJWvpp4HjiQWdJkCxO3Wbvc6jlk3Xb2Q== dependencies: - "@babel/helper-string-parser" "^7.25.9" - "@babel/helper-validator-identifier" "^7.25.9" + "@babel/helper-string-parser" "^7.27.1" + "@babel/helper-validator-identifier" "^7.27.1" "@bcoe/v8-coverage@^0.2.3": version "0.2.3" @@ -1240,10 +1240,10 @@ "@smithy/util-middleware" "^4.0.2" tslib "^2.6.2" -"@smithy/core@^3.2.0": - version "3.2.0" - resolved "https://registry.yarnpkg.com/@smithy/core/-/core-3.2.0.tgz#613b15f76eab9a6be396b1d5453b6bc8f22ba99c" - integrity sha512-k17bgQhVZ7YmUvA8at4af1TDpl0NDMBuBKJl8Yg0nrefwmValU+CnA5l/AriVdQNthU/33H3nK71HrLgqOPr1Q== +"@smithy/core@^3.3.0": + version "3.3.0" + resolved "https://registry.yarnpkg.com/@smithy/core/-/core-3.3.0.tgz#a6b141733fa530cb2f9b49a8e70ae98169c92cf0" + integrity sha512-r6gvs5OfRq/w+9unPm7B3po4rmWaGh0CIL/OwHntGGux7+RhOOZLGuurbeMgWV6W55ZuyMTypJLeH0vn/ZRaWQ== dependencies: "@smithy/middleware-serde" "^4.0.3" "@smithy/protocol-http" "^5.1.0" @@ -1334,12 +1334,12 @@ "@smithy/types" "^4.2.0" tslib "^2.6.2" -"@smithy/middleware-endpoint@^4.1.0": - version "4.1.0" - resolved "https://registry.yarnpkg.com/@smithy/middleware-endpoint/-/middleware-endpoint-4.1.0.tgz#cbfe47c5632942c960dbcf71fb02fd0d9985444d" - integrity sha512-xhLimgNCbCzsUppRTGXWkZywksuTThxaIB0HwbpsVLY5sceac4e1TZ/WKYqufQLaUy+gUSJGNdwD2jo3cXL0iA== +"@smithy/middleware-endpoint@^4.1.1": + version "4.1.1" + resolved "https://registry.yarnpkg.com/@smithy/middleware-endpoint/-/middleware-endpoint-4.1.1.tgz#d210cac102a645ea35541c17fda52c73f0b56304" + integrity sha512-z5RmcHxjvScL+LwEDU2mTNCOhgUs4lu5PGdF1K36IPRmUHhNFxNxgenSB7smyDiYD4vdKQ7CAZtG5cUErqib9w== dependencies: - "@smithy/core" "^3.2.0" + "@smithy/core" "^3.3.0" "@smithy/middleware-serde" "^4.0.3" "@smithy/node-config-provider" "^4.0.2" "@smithy/shared-ini-file-loader" "^4.0.2" @@ -1348,15 +1348,15 @@ "@smithy/util-middleware" "^4.0.2" tslib "^2.6.2" -"@smithy/middleware-retry@^4.1.0": - version "4.1.0" - resolved "https://registry.yarnpkg.com/@smithy/middleware-retry/-/middleware-retry-4.1.0.tgz#338ac1e025bbc6fd7b008152c4efa8bc0591acc9" - integrity sha512-2zAagd1s6hAaI/ap6SXi5T3dDwBOczOMCSkkYzktqN1+tzbk1GAsHNAdo/1uzxz3Ky02jvZQwbi/vmDA6z4Oyg== +"@smithy/middleware-retry@^4.1.1": + version "4.1.1" + resolved "https://registry.yarnpkg.com/@smithy/middleware-retry/-/middleware-retry-4.1.1.tgz#8c65dec6fca1f4883a10f724f9d6cafea19d0ba4" + integrity sha512-mBJOxn9aUYwcBUPQpKv9ifzrCn4EbhPUFguEZv3jB57YOMh0caS4P8HoLvUeNUI1nx4bIVH2SIbogbDfFI9DUA== dependencies: "@smithy/node-config-provider" "^4.0.2" "@smithy/protocol-http" "^5.1.0" "@smithy/service-error-classification" "^4.0.2" - "@smithy/smithy-client" "^4.2.0" + "@smithy/smithy-client" "^4.2.1" "@smithy/types" "^4.2.0" "@smithy/util-middleware" "^4.0.2" "@smithy/util-retry" "^4.0.2" @@ -1470,10 +1470,10 @@ "@smithy/util-utf8" "^1.1.0" tslib "^2.5.0" -"@smithy/signature-v4@^5.0.2": - version "5.0.2" - resolved "https://registry.yarnpkg.com/@smithy/signature-v4/-/signature-v4-5.0.2.tgz#363854e946fbc5bc206ff82e79ada5d5c14be640" - integrity sha512-Mz+mc7okA73Lyz8zQKJNyr7lIcHLiPYp0+oiqiMNc/t7/Kf2BENs5d63pEj7oPqdjaum6g0Fc8wC78dY1TgtXw== +"@smithy/signature-v4@^5.1.0": + version "5.1.0" + resolved "https://registry.yarnpkg.com/@smithy/signature-v4/-/signature-v4-5.1.0.tgz#2c56e5b278482b04383d84ea2c07b7f0a8eb8f63" + integrity sha512-4t5WX60sL3zGJF/CtZsUQTs3UrZEDO2P7pEaElrekbLqkWPYkgqNW1oeiNYC6xXifBnT9dVBOnNQRvOE9riU9w== dependencies: "@smithy/is-array-buffer" "^4.0.0" "@smithy/protocol-http" "^5.1.0" @@ -1484,13 +1484,13 @@ "@smithy/util-utf8" "^4.0.0" tslib "^2.6.2" -"@smithy/smithy-client@^4.2.0": - version "4.2.0" - resolved "https://registry.yarnpkg.com/@smithy/smithy-client/-/smithy-client-4.2.0.tgz#0c64cae4fb5bb4f26386e9b2c33fc9a3c24c9df3" - integrity sha512-Qs65/w30pWV7LSFAez9DKy0Koaoh3iHhpcpCCJ4waj/iqwsuSzJna2+vYwq46yBaqO5ZbP9TjUsATUNxrKeBdw== +"@smithy/smithy-client@^4.2.1": + version "4.2.1" + resolved "https://registry.yarnpkg.com/@smithy/smithy-client/-/smithy-client-4.2.1.tgz#21055bc038824de93aee778d040cdf9864e6114d" + integrity sha512-fbniZef60QdsBc4ZY0iyI8xbFHIiC/QRtPi66iE4ufjiE/aaz7AfUXzcWMkpO8r+QhLeNRIfmPchIG+3/QDZ6g== dependencies: - "@smithy/core" "^3.2.0" - "@smithy/middleware-endpoint" "^4.1.0" + "@smithy/core" "^3.3.0" + "@smithy/middleware-endpoint" "^4.1.1" "@smithy/middleware-stack" "^4.0.2" "@smithy/protocol-http" "^5.1.0" "@smithy/types" "^4.2.0" @@ -1574,27 +1574,27 @@ dependencies: tslib "^2.6.2" -"@smithy/util-defaults-mode-browser@^4.0.8": - version "4.0.8" - resolved "https://registry.yarnpkg.com/@smithy/util-defaults-mode-browser/-/util-defaults-mode-browser-4.0.8.tgz#77bc4590cdc928901b80f3482e79607a2cbcb150" - integrity sha512-ZTypzBra+lI/LfTYZeop9UjoJhhGRTg3pxrNpfSTQLd3AJ37r2z4AXTKpq1rFXiiUIJsYyFgNJdjWRGP/cbBaQ== +"@smithy/util-defaults-mode-browser@^4.0.9": + version "4.0.9" + resolved "https://registry.yarnpkg.com/@smithy/util-defaults-mode-browser/-/util-defaults-mode-browser-4.0.9.tgz#b70915229126eee4c1df18cd8f1e8edabade9c41" + integrity sha512-B8j0XsElvyhv6+5hlFf6vFV/uCSyLKcInpeXOGnOImX2mGXshE01RvPoGipTlRpIk53e6UfYj7WdDdgbVfXDZw== dependencies: "@smithy/property-provider" "^4.0.2" - "@smithy/smithy-client" "^4.2.0" + "@smithy/smithy-client" "^4.2.1" "@smithy/types" "^4.2.0" bowser "^2.11.0" tslib "^2.6.2" -"@smithy/util-defaults-mode-node@^4.0.8": - version "4.0.8" - resolved "https://registry.yarnpkg.com/@smithy/util-defaults-mode-node/-/util-defaults-mode-node-4.0.8.tgz#123b517efe6434977139b341d1f64b5f1e743aac" - integrity sha512-Rgk0Jc/UDfRTzVthye/k2dDsz5Xxs9LZaKCNPgJTRyoyBoeiNCnHsYGOyu1PKN+sDyPnJzMOz22JbwxzBp9NNA== +"@smithy/util-defaults-mode-node@^4.0.9": + version "4.0.9" + resolved "https://registry.yarnpkg.com/@smithy/util-defaults-mode-node/-/util-defaults-mode-node-4.0.9.tgz#2d50bcb178a214878a86563616a0b3499550a9d2" + integrity sha512-wTDU8P/zdIf9DOpV5qm64HVgGRXvqjqB/fJZTEQbrz3s79JHM/E7XkMm/876Oq+ZLHJQgnXM9QHDo29dlM62eA== dependencies: "@smithy/config-resolver" "^4.1.0" "@smithy/credential-provider-imds" "^4.0.2" "@smithy/node-config-provider" "^4.0.2" "@smithy/property-provider" "^4.0.2" - "@smithy/smithy-client" "^4.2.0" + "@smithy/smithy-client" "^4.2.1" "@smithy/types" "^4.2.0" tslib "^2.6.2" @@ -1868,7 +1868,7 @@ "@types/tough-cookie" "*" parse5 "^7.0.0" -"@types/json-schema@*", "@types/json-schema@^7.0.9": +"@types/json-schema@*", "@types/json-schema@^7.0.15", "@types/json-schema@^7.0.9": version "7.0.15" resolved "https://registry.yarnpkg.com/@types/json-schema/-/json-schema-7.0.15.tgz#596a1747233694d50f6ad8a7869fcb6f56cf5841" integrity sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA== @@ -1887,16 +1887,16 @@ form-data "^4.0.0" "@types/node@*": - version "22.14.1" - resolved "https://registry.yarnpkg.com/@types/node/-/node-22.14.1.tgz#53b54585cec81c21eee3697521e31312d6ca1e6f" - integrity sha512-u0HuPQwe/dHrItgHHpmw3N2fYCR6x4ivMNbPHRkBVP4CvN+kiRrKHWk3i8tXiO/joPwXLMYvF9TTF0eqgHIuOw== + version "22.15.3" + resolved "https://registry.yarnpkg.com/@types/node/-/node-22.15.3.tgz#b7fb9396a8ec5b5dfb1345d8ac2502060e9af68b" + integrity sha512-lX7HFZeHf4QG/J7tBZqrCAXwz9J5RD56Y6MpP0eJkka8p+K0RY/yBTW7CYFJ4VGCclxqOLKmiGP5juQc6MKgcw== dependencies: undici-types "~6.21.0" "@types/node@^18.11.18", "@types/node@^18.19.70": - version "18.19.86" - resolved "https://registry.yarnpkg.com/@types/node/-/node-18.19.86.tgz#a7e1785289c343155578b9d84a0e3e924deb948b" - integrity sha512-fifKayi175wLyKyc5qUfyENhQ1dCNI1UNjp653d8kuYcPQN5JhX3dGuP/XmvPTg/xRBn1VTLpbmi+H/Mr7tLfQ== + version "18.19.87" + resolved "https://registry.yarnpkg.com/@types/node/-/node-18.19.87.tgz#690f000cc51e3c7f48bc00f7e86fac6eb550b709" + integrity sha512-OIAAu6ypnVZHmsHCeJ+7CCSub38QNBS9uceMQeg7K5Ur0Jr+wG9wEOEvvMbhp09pxD5czIUy/jND7s7Tb6Nw7A== dependencies: undici-types "~5.26.4" @@ -2374,9 +2374,9 @@ camelcase@^6.2.0: integrity sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA== caniuse-lite@^1.0.30001688: - version "1.0.30001715" - resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001715.tgz#bd325a37ad366e3fe90827d74062807a34fbaeb2" - integrity sha512-7ptkFGMm2OAOgvZpwgA4yjQ5SQbrNVGdRjzH0pBdy1Fasvcr+KAeECmbCAECzTuDuoX0FCY8KzUxjf9+9kfZEw== + version "1.0.30001716" + resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001716.tgz#39220dfbc58c85d9d4519e7090b656aa11ca4b85" + integrity sha512-49/c1+x3Kwz7ZIWt+4DvK3aMJy9oYXXG6/97JKsnjdCk/6n9vVyWL8NAwVt95Lwt9eigI10Hl782kDfZUUlRXw== chalk@^4.0.0, chalk@^4.0.2, chalk@^4.1.0: version "4.1.2" @@ -2607,9 +2607,9 @@ ejs@^3.1.10: jake "^10.8.5" electron-to-chromium@^1.5.73: - version "1.5.140" - resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.5.140.tgz#91d9279fe72963f22c5784cc7f3461b5fed34786" - integrity sha512-o82Rj+ONp4Ip7Cl1r7lrqx/pXhbp/lh9DpKcMNscFJdh8ebyRofnc7Sh01B4jx403RI0oqTBvlZ7OBIZLMr2+Q== + version "1.5.145" + resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.5.145.tgz#abd50700ac2c809e40a4694584f66711ee937fb6" + integrity sha512-pZ5EcTWRq/055MvSBgoFEyKf2i4apwfoqJbK/ak2jnFq8oHjZ+vzc3AhRcz37Xn+ZJfL58R666FLJx0YOK9yTw== emittery@^0.13.1: version "0.13.1" @@ -3754,9 +3754,9 @@ minimatch@^5.0.1: brace-expansion "^2.0.1" module-details-from-path@^1.0.3: - version "1.0.3" - resolved "https://registry.yarnpkg.com/module-details-from-path/-/module-details-from-path-1.0.3.tgz#114c949673e2a8a35e9d35788527aa37b679da2b" - integrity sha512-ySViT69/76t8VhE1xXHK6Ch4NcDd26gx0MzKXLO+F7NOtnqH68d9zF94nT8ZWSxXh8ELOERsnJO/sWt1xZYw5A== + version "1.0.4" + resolved "https://registry.yarnpkg.com/module-details-from-path/-/module-details-from-path-1.0.4.tgz#b662fdcd93f6c83d3f25289da0ce81c8d9685b94" + integrity sha512-EGWKgxALGMgzvxYF1UyGTy0HXX/2vHLkw6+NvDKW2jypWbHpjQuj4UMcqQWXHERJhVGKikolT06G3bcKe4fi7w== ms@^2.0.0, ms@^2.1.3: version "2.1.3" @@ -3832,9 +3832,9 @@ onetime@^5.1.2: mimic-fn "^2.1.0" openai@^4.74.0: - version "4.95.1" - resolved "https://registry.yarnpkg.com/openai/-/openai-4.95.1.tgz#7157697c2b150a546b13eb860180c4a6058051da" - integrity sha512-IqJy+ymeW+k/Wq+2YVN3693OQMMcODRtHEYOlz263MdUwnN/Dwdl9c2EXSxLLtGEHkSHAfvzpDMHI5MaWJKXjQ== + version "4.96.2" + resolved "https://registry.yarnpkg.com/openai/-/openai-4.96.2.tgz#a7d360597f273a5f6ed8dd22914e598013022fa4" + integrity sha512-R2XnxvMsizkROr7BV3uNp1q/3skwPZ7fmPjO1bXLnfB4Tu5xKxrT1EVwzjhxn0MZKBKAvOaGWS63jTMN6KrIXA== dependencies: "@types/node" "^18.11.18" "@types/node-fetch" "^2.6.4" @@ -3907,7 +3907,7 @@ path-parse@^1.0.7: resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735" integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== -picocolors@^1.0.0, picocolors@^1.1.1: +picocolors@^1.1.1: version "1.1.1" resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.1.1.tgz#3d321af3eab939b083c8f929a1d12cda81c26b6b" integrity sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA== @@ -4087,7 +4087,7 @@ saxes@^6.0.0: dependencies: xmlchars "^2.2.0" -schema-utils@^4.3.0: +schema-utils@^4.3.0, schema-utils@^4.3.2: version "4.3.2" resolved "https://registry.yarnpkg.com/schema-utils/-/schema-utils-4.3.2.tgz#0c10878bf4a73fd2b1dfd14b9462b26788c806ae" integrity sha512-Gn/JaSk/Mt9gYubxTtSn/QCV4em9mpAPiR1rqy/Ocu19u/G9J5WWdNoUT4SiV6mFC3y6cxyFcFwdzPM3FgxGAQ== @@ -4426,9 +4426,9 @@ type-fest@^0.21.3: integrity sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w== type-fest@^4.39.1: - version "4.40.0" - resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-4.40.0.tgz#62bc09caccb99a75e1ad6b9b4653e8805e5e1eee" - integrity sha512-ABHZ2/tS2JkvH1PEjxFDTUWC8dB5OsIGZP4IFLhR293GqT5Y5qB1WwL2kMPYhQW9DVgVD8Hd7I8gjwPIf5GFkw== + version "4.40.1" + resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-4.40.1.tgz#d78a09f08dd1081a434dd377967650cfd565401d" + integrity sha512-9YvLNnORDpI+vghLU/Nf+zSv0kL47KbVJ1o3sKgoTefl6i+zebxbiDQWoe/oWWqPhIgQdRZRT1KA9sCPL810SA== typescript@~5.7.2: version "5.7.3" @@ -4528,12 +4528,13 @@ webpack-sources@^3.2.3: integrity sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w== webpack@^5.97.1: - version "5.99.6" - resolved "https://registry.yarnpkg.com/webpack/-/webpack-5.99.6.tgz#0d6ba7ce1d3609c977f193d2634d54e5cf36379d" - integrity sha512-TJOLrJ6oeccsGWPl7ujCYuc0pIq2cNsuD6GZDma8i5o5Npvcco/z+NKvZSFsP0/x6SShVb0+X2JK/JHUjKY9dQ== + version "5.99.7" + resolved "https://registry.yarnpkg.com/webpack/-/webpack-5.99.7.tgz#60201c1ca66da046b07d006c2f6e0cc5e8a7bdba" + integrity sha512-CNqKBRMQjwcmKR0idID5va1qlhrqVUKpovi+Ec79ksW8ux7iS1+A6VqzfZXgVYCFRKl7XL5ap3ZoMpwBJxcg0w== dependencies: "@types/eslint-scope" "^3.7.7" "@types/estree" "^1.0.6" + "@types/json-schema" "^7.0.15" "@webassemblyjs/ast" "^1.14.1" "@webassemblyjs/wasm-edit" "^1.14.1" "@webassemblyjs/wasm-parser" "^1.14.1" @@ -4550,7 +4551,7 @@ webpack@^5.97.1: loader-runner "^4.2.0" mime-types "^2.1.27" neo-async "^2.6.2" - schema-utils "^4.3.0" + schema-utils "^4.3.2" tapable "^2.1.1" terser-webpack-plugin "^5.3.11" watchpack "^2.4.1"