diff --git a/src/app/v1/_lib/codex/chat-completions-handler.ts b/src/app/v1/_lib/codex/chat-completions-handler.ts index 1aa32aa07..0ee68ed91 100644 --- a/src/app/v1/_lib/codex/chat-completions-handler.ts +++ b/src/app/v1/_lib/codex/chat-completions-handler.ts @@ -20,6 +20,32 @@ import { ProxyStatusTracker } from "@/lib/proxy-status-tracker"; import { SessionTracker } from "@/lib/session-tracker"; import type { ChatCompletionRequest } from "./types/compatible"; +function normalizeResponseInput(request: Record): void { + if (!("input" in request)) return; + + const input = request.input; + + // OpenAI Responses API supports a string shortcut: + // { model, input: "hello" } -> [{ role: "user", content: [{ type: "input_text", text: "hello" }] }] + if (typeof input === "string") { + const text = input.trim(); + request.input = text.length + ? [ + { + role: "user", + content: [{ type: "input_text", text }], + }, + ] + : []; + return; + } + + // Some clients may send a single object instead of an array. Wrap it for compatibility. + if (input && typeof input === "object" && !Array.isArray(input)) { + request.input = [input]; + } +} + /** * 处理 OpenAI Compatible API 请求 (/v1/chat/completions) * @@ -43,7 +69,12 @@ export async function handleChatCompletions(c: Context): Promise { // 格式检测 const isOpenAIFormat = "messages" in request && Array.isArray(request.messages); - const isResponseAPIFormat = "input" in request && Array.isArray(request.input); + const inputValue = (request as Record).input; + const isResponseAPIFormat = + "input" in request && + (Array.isArray(inputValue) || + typeof inputValue === "string" || + (typeof inputValue === "object" && inputValue !== null)); if (!isOpenAIFormat && !isResponseAPIFormat) { const response = new Response( @@ -158,6 +189,9 @@ export async function handleChatCompletions(c: Context): Promise { ); return await attachSessionIdToErrorResponse(session.sessionId, response); } + + // Normalize for downstream guards/filters. + normalizeResponseInput(request as Record); } const type = session.isCountTokensRequest() ? RequestType.COUNT_TOKENS : RequestType.CHAT; diff --git a/tests/unit/proxy/chat-completions-handler-guard-pipeline.test.ts b/tests/unit/proxy/chat-completions-handler-guard-pipeline.test.ts index fb69e9c46..53b64944f 100644 --- a/tests/unit/proxy/chat-completions-handler-guard-pipeline.test.ts +++ b/tests/unit/proxy/chat-completions-handler-guard-pipeline.test.ts @@ -415,6 +415,45 @@ describe("handleChatCompletions:必须走 GuardPipeline", () => { ]); }); + test("Response(input) 支持 input 为 string(OpenAI shortcut)", async () => { + h.session = createSession({ + model: "gpt-4.1-mini", + input: "hi", + stream: false, + }); + + const { handleChatCompletions } = await import("@/app/v1/_lib/codex/chat-completions-handler"); + const res = await handleChatCompletions({} as any); + + expect(res.status).toBe(200); + expect(h.session.originalFormat).toBe("response"); + expect((h.session.request.message as any).input).toEqual([ + { + role: "user", + content: [{ type: "input_text", text: "hi" }], + }, + ]); + expect(h.callOrder).toEqual([ + "auth", + "sensitive", + "client", + "model", + "version", + "probe", + "session", + "warmup", + "requestFilter", + "rateLimit", + "provider", + "providerRequestFilter", + "messageContext", + "concurrencyInc", + "forward", + "dispatch", + "concurrencyDec", + ]); + }); + test("当 sessionId 未分配时,不应进行并发计数(覆盖分支)", async () => { h.assignSessionId = false; h.session = createSession({