diff --git a/examples/bun-cli/cli.ts b/examples/bun-cli/cli.ts
index c9855463..bfa99c64 100755
--- a/examples/bun-cli/cli.ts
+++ b/examples/bun-cli/cli.ts
@@ -13,7 +13,7 @@ const inputImage = Bun.file(inputPath);
console.log("Editing image...");
const image = await client.process({
- model: models.image("lucy-pro-i2i"),
+ model: models.image("lucy-image-2"),
prompt,
data: inputImage,
});
diff --git a/examples/express-api/src/server.ts b/examples/express-api/src/server.ts
index 5994ee41..f0b53bc2 100644
--- a/examples/express-api/src/server.ts
+++ b/examples/express-api/src/server.ts
@@ -37,7 +37,7 @@ app.post("/api/image/edit", async (req, res) => {
}
const blob = await client.process({
- model: models.image("lucy-pro-i2i"),
+ model: models.image("lucy-image-2"),
prompt,
data: parseBase64DataUrl(imageDataUrl, "image"),
});
@@ -60,7 +60,7 @@ app.post("/api/video/generate", async (req, res) => {
}
const job = await client.queue.submit({
- model: models.video("lucy-pro-v2v"),
+ model: models.video("lucy-clip"),
prompt,
data: parseBase64DataUrl(videoDataUrl, "video"),
});
@@ -103,7 +103,7 @@ app.post("/api/video/generate-sync", async (req, res) => {
}
const result = await client.queue.submitAndPoll({
- model: models.video("lucy-pro-v2v"),
+ model: models.video("lucy-clip"),
prompt,
data: parseBase64DataUrl(videoDataUrl, "video"),
});
diff --git a/examples/express-proxy/README.md b/examples/express-proxy/README.md
index 94fe4dbe..db23d10f 100644
--- a/examples/express-proxy/README.md
+++ b/examples/express-proxy/README.md
@@ -70,7 +70,7 @@ const client = createDecartClient({
});
const blob = await client.process({
- model: models.image('lucy-pro-i2i'),
+ model: models.image('lucy-image-2'),
prompt: 'Turn this into a watercolor painting',
data: sourceFile,
});
diff --git a/examples/express-proxy/public/index.html b/examples/express-proxy/public/index.html
index 7a54fd77..b8b4a009 100644
--- a/examples/express-proxy/public/index.html
+++ b/examples/express-proxy/public/index.html
@@ -276,7 +276,7 @@
🎨 Decart SDK Proxy Example
showStatus('Editing image...', 'info');
const blob = await decartClient.process({
- model: models.image('lucy-pro-i2i'),
+ model: models.image('lucy-image-2'),
prompt,
data: sourceImage,
});
diff --git a/examples/hono-edge/src/index.ts b/examples/hono-edge/src/index.ts
index af56fb87..935238aa 100644
--- a/examples/hono-edge/src/index.ts
+++ b/examples/hono-edge/src/index.ts
@@ -52,7 +52,7 @@ app.post("/api/image/generate", async (c) => {
}
const blob = await client.process({
- model: models.image("lucy-pro-i2i"),
+ model: models.image("lucy-image-2"),
prompt,
data: parseBase64DataUrl(imageDataUrl, "image"),
});
@@ -76,7 +76,7 @@ app.post("/api/video/generate", async (c) => {
}
const job = await client.queue.submit({
- model: models.video("lucy-pro-v2v"),
+ model: models.video("lucy-clip"),
prompt,
data: parseBase64DataUrl(videoDataUrl, "video"),
});
diff --git a/examples/nextjs-proxy/README.md b/examples/nextjs-proxy/README.md
index 01c51434..cd6f41d5 100644
--- a/examples/nextjs-proxy/README.md
+++ b/examples/nextjs-proxy/README.md
@@ -56,7 +56,7 @@ const client = createDecartClient({
});
const blob = await client.process({
- model: models.image("lucy-pro-i2i"),
+ model: models.image("lucy-image-2"),
prompt: "Turn this into a watercolor painting",
data: sourceFile,
});
diff --git a/examples/nextjs-proxy/app/page.tsx b/examples/nextjs-proxy/app/page.tsx
index eef72b44..f132f3b2 100644
--- a/examples/nextjs-proxy/app/page.tsx
+++ b/examples/nextjs-proxy/app/page.tsx
@@ -23,7 +23,7 @@ export default function Home() {
try {
const client = createDecartClient({ proxy: PROXY_ROUTE });
const blob = await client.process({
- model: models.image("lucy-pro-i2i"),
+ model: models.image("lucy-image-2"),
prompt,
data: sourceImage,
});
diff --git a/examples/nextjs-realtime/README.md b/examples/nextjs-realtime/README.md
index 0a8bb542..34c28119 100644
--- a/examples/nextjs-realtime/README.md
+++ b/examples/nextjs-realtime/README.md
@@ -43,8 +43,8 @@ pnpm dev
## Models
-This example uses `mirage_v2` for style transformation. You can also use:
+This example uses `lucy-restyle-2` for style transformation. You can also use:
-- `mirage` - MirageLSD video restyling model (older)
-- `lucy_v2v_720p_rt` - Lucy for video editing (add objects, change elements)
-- `lucy_2_rt` - Lucy 2 for video editing with reference image support (better quality)
+- `lucy-restyle` - MirageLSD video restyling model (older)
+- `lucy` - Lucy for video editing (add objects, change elements)
+- `lucy-2` - Lucy 2 for video editing with reference image support (better quality)
diff --git a/examples/nextjs-realtime/components/video-stream.tsx b/examples/nextjs-realtime/components/video-stream.tsx
index 65559414..449d8d96 100644
--- a/examples/nextjs-realtime/components/video-stream.tsx
+++ b/examples/nextjs-realtime/components/video-stream.tsx
@@ -23,7 +23,7 @@ export function VideoStream({ prompt }: VideoStreamProps) {
async function start() {
try {
- const model = models.realtime("mirage_v2");
+ const model = models.realtime("lucy-restyle-2");
setStatus("requesting camera...");
const stream = await navigator.mediaDevices.getUserMedia({
diff --git a/examples/react-vite-weather-outfit/README.md b/examples/react-vite-weather-outfit/README.md
index 209c02e2..204f9f31 100644
--- a/examples/react-vite-weather-outfit/README.md
+++ b/examples/react-vite-weather-outfit/README.md
@@ -1,6 +1,6 @@
# React + Vite Weather Outfit
-A simple React + Vite demo that generates weather-appropriate outfits from an input photo using the Decart SDK (image-to-image with the `lucy-pro-i2i` model).
+A simple React + Vite demo that generates weather-appropriate outfits from an input photo using the Decart SDK (image-to-image with the `lucy-image-2` model).
## Setup
@@ -35,9 +35,9 @@ pnpm dev
## How it works
1. The chosen image (sample or uploaded) is sent to Decart.
-2. The `lucy-pro-i2i` model applies a prompt.
+2. The `lucy-image-2` model applies a prompt.
3. The processed image is returned.
## Model
-`lucy-pro-i2i` — image-to-image style/editing model used to restyle outfits for the selected weather.
+`lucy-image-2` — image-to-image style/editing model used to restyle outfits for the selected weather.
diff --git a/examples/react-vite-weather-outfit/src/App.tsx b/examples/react-vite-weather-outfit/src/App.tsx
index 0a4a4ba4..da457e7b 100644
--- a/examples/react-vite-weather-outfit/src/App.tsx
+++ b/examples/react-vite-weather-outfit/src/App.tsx
@@ -36,7 +36,7 @@ function App() {
setResultFile(undefined);
try {
const resultBlob = await client.process({
- model: models.image("lucy-pro-i2i"),
+ model: models.image("lucy-image-2"),
data: imageFile,
prompt: `A person wearing an outfit for ${condition.toLowerCase()} conditions`,
});
diff --git a/examples/react-vite/README.md b/examples/react-vite/README.md
index e995b58d..a5c438fb 100644
--- a/examples/react-vite/README.md
+++ b/examples/react-vite/README.md
@@ -41,8 +41,8 @@ pnpm dev
## Models
-This example uses `mirage_v2` for style transformation. You can also use:
+This example uses `lucy-restyle-2` for style transformation. You can also use:
-- `mirage` - MirageLSD video restyling model (older)
-- `lucy_v2v_720p_rt` - Lucy for video editing (add objects, change elements)
-- `lucy_2_rt` - Lucy 2 for video editing with reference image support (better quality)
+- `lucy-restyle` - MirageLSD video restyling model (older)
+- `lucy` - Lucy for video editing (add objects, change elements)
+- `lucy-2` - Lucy 2 for video editing with reference image support (better quality)
diff --git a/examples/react-vite/src/components/VideoStream.tsx b/examples/react-vite/src/components/VideoStream.tsx
index b9dbc755..0a963511 100644
--- a/examples/react-vite/src/components/VideoStream.tsx
+++ b/examples/react-vite/src/components/VideoStream.tsx
@@ -16,7 +16,7 @@ export function VideoStream({ prompt }: VideoStreamProps) {
async function start() {
try {
- const model = models.realtime("mirage_v2");
+ const model = models.realtime("lucy-restyle-2");
setStatus("requesting camera...");
const stream = await navigator.mediaDevices.getUserMedia({
diff --git a/examples/sdk-core/README.md b/examples/sdk-core/README.md
index f1caa036..6d749b8b 100644
--- a/examples/sdk-core/README.md
+++ b/examples/sdk-core/README.md
@@ -31,15 +31,15 @@ pnpm tsx image/image-to-image.ts
Image models use the synchronous Process API - they return immediately with a Blob.
-- `image/image-to-image.ts` - Transform existing image with a prompt (`lucy-pro-i2i`)
+- `image/image-to-image.ts` - Transform existing image with a prompt (`lucy-image-2`)
### Video Generation
Video models use the asynchronous Queue API - jobs are submitted and polled for completion.
-- `video/video-to-video.ts` - Transform existing video with a prompt (`lucy-pro-v2v`)
-- `video/video-editing.ts` - Edit video with prompt, reference image, or both (`lucy-2-v2v`)
-- `video/long-form-video-restyle.ts` - Transform existing video with `lucy-restyle-v2v`
+- `video/video-to-video.ts` - Transform existing video with a prompt (`lucy-clip`)
+- `video/video-editing.ts` - Edit video with prompt, reference image, or both (`lucy-2`)
+- `video/long-form-video-restyle.ts` - Transform existing video with `lucy-restyle-2`
- `video/manual-polling.ts` - Manual job status polling
### Realtime (Browser-only)
@@ -64,7 +64,7 @@ See `examples/nextjs-realtime` or `examples/react-vite` for runnable demos.
```typescript
// Image-to-image (edit image with prompt)
const blob = await client.process({
- model: models.image("lucy-pro-i2i"),
+ model: models.image("lucy-image-2"),
prompt: "Transform to watercolor style",
data: imageBlob,
});
@@ -75,7 +75,7 @@ const blob = await client.process({
```typescript
// Automatic polling (video-to-video)
const result = await client.queue.submitAndPoll({
- model: models.video("lucy-pro-v2v"),
+ model: models.video("lucy-clip"),
prompt: "Make it look like a watercolor painting",
data: videoBlob,
onStatusChange: (job) => console.log(job.status),
@@ -91,7 +91,7 @@ const blob = await client.queue.result(job.job_id);
```typescript
const realtimeClient = await client.realtime.connect(stream, {
- model: models.realtime("mirage_v2"),
+ model: models.realtime("lucy-restyle-2"),
onRemoteStream: (transformedStream) => { ... },
initialState: { prompt: { text: "anime style", enhance: true } },
});
@@ -106,7 +106,7 @@ realtimeClient.disconnect();
```typescript
// Option 1: Use playAudio() to inject audio
const realtimeClient = await client.realtime.connect(null, {
- model: models.realtime("live_avatar"),
+ model: models.realtime("live-avatar"),
onRemoteStream: (videoStream) => { ... },
initialState: {
image: "https://example.com/avatar.png",
@@ -118,7 +118,7 @@ await realtimeClient.playAudio(audioBlob);
// Option 2: Use mic input directly
const micStream = await navigator.mediaDevices.getUserMedia({ audio: true, video: false });
const realtimeClient = await client.realtime.connect(micStream, {
- model: models.realtime("live_avatar"),
+ model: models.realtime("live-avatar"),
onRemoteStream: (videoStream) => { ... },
initialState: {
image: avatarFile,
diff --git a/examples/sdk-core/image/image-to-image.ts b/examples/sdk-core/image/image-to-image.ts
index d0750a81..7d360dde 100644
--- a/examples/sdk-core/image/image-to-image.ts
+++ b/examples/sdk-core/image/image-to-image.ts
@@ -16,7 +16,7 @@ run(async () => {
// const referenceImage = fs.readFileSync("reference.png");
const blob = await client.process({
- model: models.image("lucy-pro-i2i"),
+ model: models.image("lucy-image-2"),
prompt: "Transform to watercolor painting style",
data: new Blob([inputImage]),
// reference_image: new Blob([referenceImage]),
diff --git a/examples/sdk-core/realtime/connection-events.ts b/examples/sdk-core/realtime/connection-events.ts
index fd49469b..259d63a8 100644
--- a/examples/sdk-core/realtime/connection-events.ts
+++ b/examples/sdk-core/realtime/connection-events.ts
@@ -7,7 +7,7 @@
import { createDecartClient, type DecartSDKError, models } from "@decartai/sdk";
async function main() {
- const model = models.realtime("mirage_v2");
+ const model = models.realtime("lucy-restyle-2");
const stream = await navigator.mediaDevices.getUserMedia({
video: {
diff --git a/examples/sdk-core/realtime/custom-base-url.ts b/examples/sdk-core/realtime/custom-base-url.ts
index 2d76ff58..dbdcb081 100644
--- a/examples/sdk-core/realtime/custom-base-url.ts
+++ b/examples/sdk-core/realtime/custom-base-url.ts
@@ -7,7 +7,7 @@
import { createDecartClient, models } from "@decartai/sdk";
async function main() {
- const model = models.realtime("mirage_v2");
+ const model = models.realtime("lucy-restyle-2");
// Get webcam stream with model-specific settings
const stream = await navigator.mediaDevices.getUserMedia({
diff --git a/examples/sdk-core/realtime/live-avatar.ts b/examples/sdk-core/realtime/live-avatar.ts
index 10cb72e9..d9617f1f 100644
--- a/examples/sdk-core/realtime/live-avatar.ts
+++ b/examples/sdk-core/realtime/live-avatar.ts
@@ -10,7 +10,7 @@ import { createDecartClient, models } from "@decartai/sdk";
* Pass null for stream - the SDK creates an internal audio stream
*/
async function withPlayAudio() {
- const model = models.realtime("live_avatar");
+ const model = models.realtime("live-avatar");
const client = createDecartClient({
apiKey: process.env.DECART_API_KEY!,
@@ -43,7 +43,7 @@ async function withPlayAudio() {
* Pass user's audio stream - avatar speaks what user says
*/
async function withMicInput() {
- const model = models.realtime("live_avatar");
+ const model = models.realtime("live-avatar");
// Get user's microphone stream
const micStream = await navigator.mediaDevices.getUserMedia({
diff --git a/examples/sdk-core/realtime/lucy-2.1-vton.ts b/examples/sdk-core/realtime/lucy-2.1-vton.ts
new file mode 100644
index 00000000..e079dd75
--- /dev/null
+++ b/examples/sdk-core/realtime/lucy-2.1-vton.ts
@@ -0,0 +1,48 @@
+/**
+ * Browser-only example - requires WebRTC APIs
+ * Lucy 2.1 VTON (Virtual Try-On) for realtime garment/outfit transfer
+ * See examples/nextjs-realtime or examples/react-vite for runnable demos
+ */
+
+import { createDecartClient, models } from "@decartai/sdk";
+
+async function main() {
+ const model = models.realtime("lucy-2.1-vton");
+
+ const stream = await navigator.mediaDevices.getUserMedia({
+ audio: true,
+ video: {
+ frameRate: model.fps,
+ width: model.width,
+ height: model.height,
+ },
+ });
+
+ const client = createDecartClient({
+ apiKey: process.env.DECART_API_KEY!,
+ });
+
+ const realtimeClient = await client.realtime.connect(stream, {
+ model,
+ onRemoteStream: (editedStream) => {
+ const video = document.getElementById("output") as HTMLVideoElement;
+ video.srcObject = editedStream;
+ },
+ initialState: {
+ prompt: {
+ text: "Wearing a red leather jacket",
+ enhance: true,
+ },
+ },
+ });
+
+ // Use a reference image of a garment to try on
+ await realtimeClient.set({
+ prompt: "Wearing the outfit from the reference image",
+ image: "https://example.com/outfit-reference.png",
+ });
+
+ console.log("Session ID:", realtimeClient.sessionId);
+}
+
+main();
diff --git a/examples/sdk-core/realtime/lucy-2.1.ts b/examples/sdk-core/realtime/lucy-2.1.ts
new file mode 100644
index 00000000..4daea3cf
--- /dev/null
+++ b/examples/sdk-core/realtime/lucy-2.1.ts
@@ -0,0 +1,55 @@
+/**
+ * Browser-only example - requires WebRTC APIs
+ * Lucy 2.1 for realtime video editing with reference image + prompt support
+ * See examples/nextjs-realtime or examples/react-vite for runnable demos
+ */
+
+import { createDecartClient, models } from "@decartai/sdk";
+
+async function main() {
+ const model = models.realtime("lucy-2.1");
+
+ const stream = await navigator.mediaDevices.getUserMedia({
+ audio: true,
+ video: {
+ frameRate: model.fps,
+ width: model.width,
+ height: model.height,
+ },
+ });
+
+ const client = createDecartClient({
+ apiKey: process.env.DECART_API_KEY!,
+ });
+
+ const realtimeClient = await client.realtime.connect(stream, {
+ model,
+ onRemoteStream: (editedStream) => {
+ const video = document.getElementById("output") as HTMLVideoElement;
+ video.srcObject = editedStream;
+ },
+ initialState: {
+ prompt: {
+ text: "Add a small dog in the background",
+ enhance: true,
+ },
+ },
+ });
+
+ // set() replaces the full state — prompt + image atomically in a single message
+ await realtimeClient.set({
+ prompt: "A person wearing a superhero costume",
+ enhance: true,
+ image: "https://example.com/superhero-reference.png",
+ });
+
+ // Prompt-only set() clears the reference image.
+ await realtimeClient.set({ prompt: "Add sunglasses to the person" });
+
+ // setPrompt() as syntactic sugar for set() with prompt only
+ realtimeClient.setPrompt("Change the person's shirt to red");
+
+ console.log("Session ID:", realtimeClient.sessionId);
+}
+
+main();
diff --git a/examples/sdk-core/realtime/lucy-2.ts b/examples/sdk-core/realtime/lucy-2.ts
index 0db27dc3..f6d9defc 100644
--- a/examples/sdk-core/realtime/lucy-2.ts
+++ b/examples/sdk-core/realtime/lucy-2.ts
@@ -7,7 +7,7 @@
import { createDecartClient, models } from "@decartai/sdk";
async function main() {
- const model = models.realtime("lucy_2_rt");
+ const model = models.realtime("lucy-2");
const stream = await navigator.mediaDevices.getUserMedia({
audio: true,
diff --git a/examples/sdk-core/realtime/lucy-v2v-720p.ts b/examples/sdk-core/realtime/lucy-v2v-720p.ts
index 0731ab29..7ddb86ae 100644
--- a/examples/sdk-core/realtime/lucy-v2v-720p.ts
+++ b/examples/sdk-core/realtime/lucy-v2v-720p.ts
@@ -7,7 +7,7 @@
import { createDecartClient, models } from "@decartai/sdk";
async function main() {
- const model = models.realtime("lucy_v2v_720p_rt");
+ const model = models.realtime("lucy");
const stream = await navigator.mediaDevices.getUserMedia({
audio: true,
diff --git a/examples/sdk-core/realtime/mirage-basic.ts b/examples/sdk-core/realtime/mirage-basic.ts
index 0c8ae909..90c2753d 100644
--- a/examples/sdk-core/realtime/mirage-basic.ts
+++ b/examples/sdk-core/realtime/mirage-basic.ts
@@ -6,7 +6,7 @@
import { createDecartClient, models } from "@decartai/sdk";
async function main() {
- const model = models.realtime("mirage");
+ const model = models.realtime("lucy-restyle");
// Get webcam stream with model-specific settings
const stream = await navigator.mediaDevices.getUserMedia({
diff --git a/examples/sdk-core/realtime/mirage-v2-basic.ts b/examples/sdk-core/realtime/mirage-v2-basic.ts
index 75601117..f8eb6318 100644
--- a/examples/sdk-core/realtime/mirage-v2-basic.ts
+++ b/examples/sdk-core/realtime/mirage-v2-basic.ts
@@ -7,7 +7,7 @@
import { createDecartClient, models } from "@decartai/sdk";
async function main() {
- const model = models.realtime("mirage_v2");
+ const model = models.realtime("lucy-restyle-2");
// Get webcam stream with model-specific settings
const stream = await navigator.mediaDevices.getUserMedia({
diff --git a/examples/sdk-core/realtime/prompt-update.ts b/examples/sdk-core/realtime/prompt-update.ts
index 0ea51a67..be7fc1ff 100644
--- a/examples/sdk-core/realtime/prompt-update.ts
+++ b/examples/sdk-core/realtime/prompt-update.ts
@@ -7,7 +7,7 @@
import { createDecartClient, models } from "@decartai/sdk";
async function main() {
- const model = models.realtime("mirage_v2");
+ const model = models.realtime("lucy-restyle-2");
const stream = await navigator.mediaDevices.getUserMedia({
video: {
diff --git a/examples/sdk-core/video/long-form-video-restyle.ts b/examples/sdk-core/video/long-form-video-restyle.ts
index a6e91ed9..52fbe2ed 100644
--- a/examples/sdk-core/video/long-form-video-restyle.ts
+++ b/examples/sdk-core/video/long-form-video-restyle.ts
@@ -12,13 +12,13 @@ run(async () => {
apiKey,
});
- console.log("Editing video with lucy-restyle-v2v...");
+ console.log("Editing video with lucy-restyle-2...");
const inputVideo = fs.readFileSync("input.mp4");
// Option 1: Use a text prompt
const result = await client.queue.submitAndPoll({
- model: models.video("lucy-restyle-v2v"),
+ model: models.video("lucy-restyle-2"),
prompt: "Transform to anime style",
enhance_prompt: true,
data: new Blob([inputVideo]),
@@ -33,7 +33,7 @@ run(async () => {
//
// const referenceImage = fs.readFileSync("reference.png");
// const result = await client.queue.submitAndPoll({
- // model: models.video("lucy-restyle-v2v"),
+ // model: models.video("lucy-restyle-2"),
// reference_image: new Blob([referenceImage]),
// data: new Blob([inputVideo]),
// onStatusChange: (job) => {
diff --git a/examples/sdk-core/video/manual-polling.ts b/examples/sdk-core/video/manual-polling.ts
index 04241ece..b5718c38 100644
--- a/examples/sdk-core/video/manual-polling.ts
+++ b/examples/sdk-core/video/manual-polling.ts
@@ -13,7 +13,7 @@ run(async () => {
// Submit job
const job = await client.queue.submit({
- model: models.video("lucy-pro-v2v"),
+ model: models.video("lucy-clip"),
prompt: "A timelapse of a flower blooming",
data: new Blob([inputVideo]),
});
diff --git a/examples/sdk-core/video/video-editing-2.1.ts b/examples/sdk-core/video/video-editing-2.1.ts
new file mode 100644
index 00000000..47d63913
--- /dev/null
+++ b/examples/sdk-core/video/video-editing-2.1.ts
@@ -0,0 +1,35 @@
+import fs from "node:fs";
+import { createDecartClient, models } from "@decartai/sdk";
+import { run } from "../lib/run";
+
+run(async () => {
+ const apiKey = process.env.DECART_API_KEY;
+ if (!apiKey) {
+ throw new Error("DECART_API_KEY environment variable is required");
+ }
+
+ const client = createDecartClient({
+ apiKey,
+ });
+
+ console.log("Editing video with lucy-2.1...");
+
+ const inputVideo = fs.readFileSync("input.mp4");
+
+ const result = await client.queue.submitAndPoll({
+ model: models.video("lucy-2.1"),
+ prompt: "Transform to watercolor painting style with soft brushstrokes",
+ data: new Blob([inputVideo]),
+ onStatusChange: (job) => {
+ console.log(`Job ${job.job_id}: ${job.status}`);
+ },
+ });
+
+ if (result.status === "completed") {
+ const output = Buffer.from(await result.data.arrayBuffer());
+ fs.writeFileSync("output.mp4", output);
+ console.log("Video saved to output.mp4");
+ } else {
+ console.log("Job failed:", result.error);
+ }
+});
diff --git a/examples/sdk-core/video/video-editing.ts b/examples/sdk-core/video/video-editing.ts
index c3ed692f..c07242b5 100644
--- a/examples/sdk-core/video/video-editing.ts
+++ b/examples/sdk-core/video/video-editing.ts
@@ -12,13 +12,13 @@ run(async () => {
apiKey,
});
- console.log("Editing video with lucy-2-v2v...");
+ console.log("Editing video with lucy-2...");
const inputVideo = fs.readFileSync("input.mp4");
// Option 1: Use a text prompt
const result = await client.queue.submitAndPoll({
- model: models.video("lucy-2-v2v"),
+ model: models.video("lucy-2"),
prompt: "Transform to watercolor painting style with soft brushstrokes",
data: new Blob([inputVideo]),
onStatusChange: (job) => {
@@ -29,7 +29,7 @@ run(async () => {
// Option 2: Use a reference image to guide the edit (with empty prompt)
// const referenceImage = fs.readFileSync("reference.png");
// const result = await client.queue.submitAndPoll({
- // model: models.video("lucy-2-v2v"),
+ // model: models.video("lucy-2"),
// prompt: "",
// reference_image: new Blob([referenceImage]),
// data: new Blob([inputVideo]),
@@ -41,7 +41,7 @@ run(async () => {
// Option 3: Use both a prompt and a reference image together
// const referenceImage = fs.readFileSync("reference.png");
// const result = await client.queue.submitAndPoll({
- // model: models.video("lucy-2-v2v"),
+ // model: models.video("lucy-2"),
// prompt: "Apply the style from the reference image",
// reference_image: new Blob([referenceImage]),
// data: new Blob([inputVideo]),
diff --git a/examples/sdk-core/video/video-to-video.ts b/examples/sdk-core/video/video-to-video.ts
index 49fd6053..e4473c17 100644
--- a/examples/sdk-core/video/video-to-video.ts
+++ b/examples/sdk-core/video/video-to-video.ts
@@ -13,7 +13,7 @@ run(async () => {
// Basic usage with prompt only
const result = await client.queue.submitAndPoll({
- model: models.video("lucy-pro-v2v"),
+ model: models.video("lucy-clip"),
prompt: "Transform to anime style",
data: new Blob([inputVideo]),
onStatusChange: (job) => {
@@ -24,7 +24,7 @@ run(async () => {
// With reference image - use an image to guide what to add to the video
// const referenceImage = fs.readFileSync("hat.png");
// const result = await client.queue.submitAndPoll({
- // model: models.video("lucy-pro-v2v"),
+ // model: models.video("lucy-clip"),
// prompt: "Add the hat from the reference image to the person",
// data: new Blob([inputVideo]),
// reference_image: new Blob([referenceImage]),
diff --git a/examples/tanstack-streamer/README.md b/examples/tanstack-streamer/README.md
index 459966ca..08b5b58b 100644
--- a/examples/tanstack-streamer/README.md
+++ b/examples/tanstack-streamer/README.md
@@ -27,7 +27,7 @@ pnpm dev
## Features
-- Real-time webcam video transformation using `lucy_2_rt`
+- Real-time webcam video transformation using `lucy-2`
- Producer + subscriber streaming pattern
- Shareable viewer link via subscribe token
- Dynamic style prompt updates
@@ -38,7 +38,7 @@ pnpm dev
| Route | Description |
|-------|-------------|
-| `/` | **Producer** — streams your camera through `lucy_2_rt`, shows styled output, and generates a shareable viewer link |
+| `/` | **Producer** — streams your camera through `lucy-2`, shows styled output, and generates a shareable viewer link |
| `/watch?token=...` | **Subscriber** — watches the producer's styled stream (receive-only, no camera needed) |
## How it works
@@ -51,8 +51,8 @@ pnpm dev
## Models
-This example uses `lucy_2_rt` for video editing with reference image support. You can also use:
+This example uses `lucy-2` for video editing with reference image support. You can also use:
-- `mirage` - MirageLSD video restyling model (older)
-- `mirage_v2` - MirageLSD v2 for style transformation
-- `lucy_v2v_720p_rt` - Lucy for video editing (add objects, change elements)
+- `lucy-restyle` - MirageLSD video restyling model (older)
+- `lucy-restyle-2` - MirageLSD v2 for style transformation
+- `lucy` - Lucy for video editing (add objects, change elements)
diff --git a/examples/tanstack-streamer/src/routes/index.tsx b/examples/tanstack-streamer/src/routes/index.tsx
index e7048d66..7b5882e0 100644
--- a/examples/tanstack-streamer/src/routes/index.tsx
+++ b/examples/tanstack-streamer/src/routes/index.tsx
@@ -19,7 +19,7 @@ function ProducerPage() {
const start = useCallback(async () => {
try {
- const model = models.realtime("lucy_2_rt");
+ const model = models.realtime("lucy-2");
setStatus("requesting-camera");
const stream = await navigator.mediaDevices.getUserMedia({
@@ -94,7 +94,7 @@ function ProducerPage() {
Producer
- Streams your camera through lucy_2_rt and generates a subscribe link for viewers.
+ Streams your camera through lucy-2 and generates a subscribe link for viewers.
{status === "idle" && (
diff --git a/packages/proxy/README.md b/packages/proxy/README.md
index 2c999673..6c30f687 100644
--- a/packages/proxy/README.md
+++ b/packages/proxy/README.md
@@ -27,7 +27,7 @@ The proxy supports all model endpoints, apart from the realtime models.
## How It Works
-1. Client SDK makes a request to your proxy endpoint (e.g., `/api/decart/v1/generate/lucy-pro-i2i`)
+1. Client SDK makes a request to your proxy endpoint (e.g., `/api/decart/v1/generate/lucy-image-2`)
2. Proxy middleware intercepts the request
3. Proxy attaches your server's API key to the request
4. Proxy forwards the request to `https://api.decart.ai`
diff --git a/packages/proxy/src/express/README.md b/packages/proxy/src/express/README.md
index c720f98b..016ac415 100644
--- a/packages/proxy/src/express/README.md
+++ b/packages/proxy/src/express/README.md
@@ -25,7 +25,7 @@ const client = createDecartClient({ proxy: "/api/decart" });
// Use the client as normal
const result = await client.process({
- model: models.image("lucy-pro-i2i"),
+ model: models.image("lucy-image-2"),
prompt: "Make it look like a watercolor painting",
data: imageBlob,
});
diff --git a/packages/proxy/src/nextjs/README.md b/packages/proxy/src/nextjs/README.md
index f6f2275f..03cd1a84 100644
--- a/packages/proxy/src/nextjs/README.md
+++ b/packages/proxy/src/nextjs/README.md
@@ -35,7 +35,7 @@ const client = createDecartClient({ proxy: PROXY_ROUTE });
// Use the client as normal
const result = await client.process({
- model: models.image("lucy-pro-i2i"),
+ model: models.image("lucy-image-2"),
prompt: "Make it look like a watercolor painting",
data: imageBlob,
});
diff --git a/packages/sdk/src/process/types.ts b/packages/sdk/src/process/types.ts
index 3e83b35c..fef4f637 100644
--- a/packages/sdk/src/process/types.ts
+++ b/packages/sdk/src/process/types.ts
@@ -159,13 +159,13 @@ export interface PromptInput {
* This allows different models to have field-specific documentation while maintaining type safety.
* Specific models are checked first, then falls back to category-based selection.
*/
-export type ModelSpecificInputs
= T["name"] extends "lucy-pro-i2i"
+export type ModelSpecificInputs = T["name"] extends "lucy-pro-i2i" | "lucy-image-2"
? ImageEditingInputs
- : T["name"] extends "lucy-restyle-v2v"
+ : T["name"] extends "lucy-restyle-v2v" | "lucy-restyle-2"
? VideoRestyleInputs
- : T["name"] extends "lucy-2-v2v"
+ : T["name"] extends "lucy-2-v2v" | "lucy-2" | "lucy-2.1"
? VideoEdit2Inputs
- : T["name"] extends "lucy-pro-v2v"
+ : T["name"] extends "lucy-pro-v2v" | "lucy-clip"
? VideoEditInputs
: T["name"] extends ImageModels
? ImageGenerationInputs
diff --git a/packages/sdk/src/realtime/client.ts b/packages/sdk/src/realtime/client.ts
index f1869b0a..17d7f38b 100644
--- a/packages/sdk/src/realtime/client.ts
+++ b/packages/sdk/src/realtime/client.ts
@@ -135,7 +135,7 @@ export const createRealTimeClient = (opts: RealTimeClientOptions) => {
throw parsedOptions.error;
}
- const isAvatarLive = options.model.name === "live_avatar";
+ const isAvatarLive = options.model.name === "live_avatar" || options.model.name === "live-avatar";
const { onRemoteStream, initialState } = parsedOptions.data;
diff --git a/packages/sdk/src/realtime/webrtc-connection.ts b/packages/sdk/src/realtime/webrtc-connection.ts
index dc5802b9..17479adb 100644
--- a/packages/sdk/src/realtime/webrtc-connection.ts
+++ b/packages/sdk/src/realtime/webrtc-connection.ts
@@ -416,7 +416,7 @@ export class WebRTCConnection {
if (this.localStream) {
// For live_avatar: add receive-only video transceiver (sends audio only, receives audio+video)
- if (this.callbacks.modelName === "live_avatar") {
+ if (this.callbacks.modelName === "live_avatar" || this.callbacks.modelName === "live-avatar") {
this.pc.addTransceiver("video", { direction: "recvonly" });
}
diff --git a/packages/sdk/src/shared/model.ts b/packages/sdk/src/shared/model.ts
index d6cbc3b1..2f496da0 100644
--- a/packages/sdk/src/shared/model.ts
+++ b/packages/sdk/src/shared/model.ts
@@ -1,7 +1,49 @@
import { z } from "zod";
import { createModelNotFoundError } from "../utils/errors";
+/**
+ * Map of deprecated model names to their canonical replacements.
+ * Old names still work but will log a deprecation warning.
+ */
+const MODEL_ALIASES: Record = {
+ mirage: "lucy-restyle",
+ mirage_v2: "lucy-restyle-2",
+ lucy_v2v_720p_rt: "lucy",
+ lucy_2_rt: "lucy-2",
+ live_avatar: "live-avatar",
+ "lucy-pro-v2v": "lucy-clip",
+ "lucy-restyle-v2v": "lucy-restyle-2",
+ "lucy-2-v2v": "lucy-2",
+ "lucy-pro-i2i": "lucy-image-2",
+};
+
+const _warnedAliases = new Set();
+
+/** @internal Test-only helper to reset deprecation warning tracking */
+export function _resetDeprecationWarnings(): void {
+ _warnedAliases.clear();
+}
+
+function warnDeprecated(model: string): void {
+ const canonical = MODEL_ALIASES[model];
+ if (canonical && !_warnedAliases.has(model)) {
+ _warnedAliases.add(model);
+ console.warn(
+ `[Decart SDK] Model "${model}" is deprecated. Use "${canonical}" instead. See https://docs.platform.decart.ai/models for details.`,
+ );
+ }
+}
+
export const realtimeModels = z.union([
+ // Canonical names
+ z.literal("lucy"),
+ z.literal("lucy-2"),
+ z.literal("lucy-2.1"),
+ z.literal("lucy-2.1-vton"),
+ z.literal("lucy-restyle"),
+ z.literal("lucy-restyle-2"),
+ z.literal("live-avatar"),
+ // Deprecated names (use canonical names above instead)
z.literal("mirage"),
z.literal("mirage_v2"),
z.literal("lucy_v2v_720p_rt"),
@@ -9,12 +51,23 @@ export const realtimeModels = z.union([
z.literal("live_avatar"),
]);
export const videoModels = z.union([
- z.literal("lucy-pro-v2v"),
+ // Canonical names
+ z.literal("lucy-clip"),
+ z.literal("lucy-2"),
+ z.literal("lucy-2.1"),
+ z.literal("lucy-restyle-2"),
z.literal("lucy-motion"),
+ // Deprecated names (use canonical names above instead)
+ z.literal("lucy-pro-v2v"),
z.literal("lucy-restyle-v2v"),
z.literal("lucy-2-v2v"),
]);
-export const imageModels = z.literal("lucy-pro-i2i");
+export const imageModels = z.union([
+ // Canonical name
+ z.literal("lucy-image-2"),
+ // Deprecated name (use canonical name above instead)
+ z.literal("lucy-pro-i2i"),
+]);
export const modelSchema = z.union([realtimeModels, videoModels, imageModels]);
export type Model = z.infer;
@@ -67,41 +120,84 @@ const motionResolutionSchema = z
.describe("The resolution to use for the generation");
/**
- * Resolution schema for lucy-pro-v2v (supports 720p).
+ * Resolution schema for video-to-video models (supports 720p).
*/
-const proV2vResolutionSchema = z
+const v2vResolutionSchema = z
.literal("720p")
.optional()
.describe("The resolution to use for the generation")
.default("720p");
-export const modelInputSchemas = {
- "lucy-pro-v2v": z.object({
- prompt: z.string().min(1).max(1000).describe("The prompt to use for the generation"),
- data: fileInputSchema.describe(
- "The video data to use for generation (File, Blob, ReadableStream, URL, or string URL). Output video is limited to 5 seconds.",
+const videoEditSchema = z.object({
+ prompt: z.string().min(1).max(1000).describe("The prompt to use for the generation"),
+ data: fileInputSchema.describe(
+ "The video data to use for generation (File, Blob, ReadableStream, URL, or string URL). Output video is limited to 5 seconds.",
+ ),
+ reference_image: fileInputSchema
+ .optional()
+ .describe(
+ "Optional reference image to guide what to add to the video (File, Blob, ReadableStream, URL, or string URL)",
),
+ seed: z.number().optional().describe("The seed to use for the generation"),
+ resolution: v2vResolutionSchema,
+ enhance_prompt: z.boolean().optional().describe("Whether to enhance the prompt"),
+});
+
+const imageEditSchema = z.object({
+ prompt: z.string().min(1).max(1000).describe("The prompt to use for the generation"),
+ data: fileInputSchema.describe(
+ "The image data to use for generation (File, Blob, ReadableStream, URL, or string URL)",
+ ),
+ reference_image: fileInputSchema
+ .optional()
+ .describe("Optional reference image to guide the edit (File, Blob, ReadableStream, URL, or string URL)"),
+ seed: z.number().optional().describe("The seed to use for the generation"),
+ resolution: proResolutionSchema(),
+ enhance_prompt: z.boolean().optional().describe("Whether to enhance the prompt"),
+});
+
+const restyleSchema = z
+ .object({
+ prompt: z.string().min(1).max(1000).optional().describe("Text prompt for the video editing"),
reference_image: fileInputSchema
.optional()
- .describe(
- "Optional reference image to guide what to add to the video (File, Blob, ReadableStream, URL, or string URL)",
- ),
- seed: z.number().optional().describe("The seed to use for the generation"),
- resolution: proV2vResolutionSchema,
- enhance_prompt: z.boolean().optional().describe("Whether to enhance the prompt"),
- }),
- "lucy-pro-i2i": z.object({
- prompt: z.string().min(1).max(1000).describe("The prompt to use for the generation"),
- data: fileInputSchema.describe(
- "The image data to use for generation (File, Blob, ReadableStream, URL, or string URL)",
- ),
- reference_image: fileInputSchema
+ .describe("Reference image to transform into a prompt (File, Blob, ReadableStream, URL, or string URL)"),
+ data: fileInputSchema.describe("Video file to process (File, Blob, ReadableStream, URL, or string URL)"),
+ seed: z.number().optional().describe("Seed for the video generation"),
+ resolution: v2vResolutionSchema,
+ enhance_prompt: z
+ .boolean()
.optional()
- .describe("Optional reference image to guide the edit (File, Blob, ReadableStream, URL, or string URL)"),
- seed: z.number().optional().describe("The seed to use for the generation"),
- resolution: proResolutionSchema(),
- enhance_prompt: z.boolean().optional().describe("Whether to enhance the prompt"),
- }),
+ .describe("Whether to enhance the prompt (only valid with text prompt, defaults to true on backend)"),
+ })
+ .refine((data) => (data.prompt !== undefined) !== (data.reference_image !== undefined), {
+ message: "Must provide either 'prompt' or 'reference_image', but not both",
+ })
+ .refine((data) => !(data.reference_image !== undefined && data.enhance_prompt !== undefined), {
+ message: "'enhance_prompt' is only valid when using 'prompt', not 'reference_image'",
+ });
+
+const videoEdit2Schema = z.object({
+ prompt: z
+ .string()
+ .max(1000)
+ .describe("Text prompt for the video editing. Send an empty string if you want no text prompt."),
+ reference_image: fileInputSchema
+ .optional()
+ .describe("Optional reference image to guide the edit (File, Blob, ReadableStream, URL, or string URL)"),
+ data: fileInputSchema.describe("Video file to process (File, Blob, ReadableStream, URL, or string URL)"),
+ seed: z.number().optional().describe("The seed to use for the generation"),
+ resolution: v2vResolutionSchema,
+ enhance_prompt: z.boolean().optional().describe("Whether to enhance the prompt"),
+});
+
+export const modelInputSchemas = {
+ // Canonical names
+ "lucy-clip": videoEditSchema,
+ "lucy-image-2": imageEditSchema,
+ "lucy-restyle-2": restyleSchema,
+ "lucy-2": videoEdit2Schema,
+ "lucy-2.1": videoEdit2Schema,
"lucy-motion": z.object({
data: fileInputSchema.describe(
"The image data to use for generation (File, Blob, ReadableStream, URL, or string URL). Output video is limited to 5 seconds.",
@@ -120,39 +216,11 @@ export const modelInputSchemas = {
seed: z.number().optional().describe("The seed to use for the generation"),
resolution: motionResolutionSchema,
}),
- "lucy-restyle-v2v": z
- .object({
- prompt: z.string().min(1).max(1000).optional().describe("Text prompt for the video editing"),
- reference_image: fileInputSchema
- .optional()
- .describe("Reference image to transform into a prompt (File, Blob, ReadableStream, URL, or string URL)"),
- data: fileInputSchema.describe("Video file to process (File, Blob, ReadableStream, URL, or string URL)"),
- seed: z.number().optional().describe("Seed for the video generation"),
- resolution: proV2vResolutionSchema,
- enhance_prompt: z
- .boolean()
- .optional()
- .describe("Whether to enhance the prompt (only valid with text prompt, defaults to true on backend)"),
- })
- .refine((data) => (data.prompt !== undefined) !== (data.reference_image !== undefined), {
- message: "Must provide either 'prompt' or 'reference_image', but not both",
- })
- .refine((data) => !(data.reference_image !== undefined && data.enhance_prompt !== undefined), {
- message: "'enhance_prompt' is only valid when using 'prompt', not 'reference_image'",
- }),
- "lucy-2-v2v": z.object({
- prompt: z
- .string()
- .max(1000)
- .describe("Text prompt for the video editing. Send an empty string if you want no text prompt."),
- reference_image: fileInputSchema
- .optional()
- .describe("Optional reference image to guide the edit (File, Blob, ReadableStream, URL, or string URL)"),
- data: fileInputSchema.describe("Video file to process (File, Blob, ReadableStream, URL, or string URL)"),
- seed: z.number().optional().describe("The seed to use for the generation"),
- resolution: proV2vResolutionSchema,
- enhance_prompt: z.boolean().optional().describe("Whether to enhance the prompt"),
- }),
+ // Deprecated names (kept for backward compatibility)
+ "lucy-pro-v2v": videoEditSchema,
+ "lucy-pro-i2i": imageEditSchema,
+ "lucy-restyle-v2v": restyleSchema,
+ "lucy-2-v2v": videoEdit2Schema,
} as const;
export type ModelInputSchemas = typeof modelInputSchemas;
@@ -179,14 +247,16 @@ export type CustomModelDefinition = Omit;
+export type ImageModelDefinition = ModelDefinition & { queueUrlPath: string };
/**
* Type alias for model definitions that support queue processing.
* Only video models support the queue API.
+ * Requires `queueUrlPath` to distinguish from realtime definitions of the same model name.
*/
-export type VideoModelDefinition = ModelDefinition;
+export type VideoModelDefinition = ModelDefinition & { queueUrlPath: string };
export const modelDefinitionSchema = z.object({
name: z.string(),
@@ -200,6 +270,64 @@ export const modelDefinitionSchema = z.object({
const _models = {
realtime: {
+ // Canonical names
+ lucy: {
+ urlPath: "/v1/stream",
+ name: "lucy" as const,
+ fps: 25,
+ width: 1280,
+ height: 704,
+ inputSchema: z.object({}),
+ },
+ "lucy-2": {
+ urlPath: "/v1/stream",
+ name: "lucy-2" as const,
+ fps: 20,
+ width: 1280,
+ height: 720,
+ inputSchema: z.object({}),
+ },
+ "lucy-2.1": {
+ urlPath: "/v1/stream",
+ name: "lucy-2.1" as const,
+ fps: 20,
+ width: 1088,
+ height: 624,
+ inputSchema: z.object({}),
+ },
+ "lucy-2.1-vton": {
+ urlPath: "/v1/stream",
+ name: "lucy-2.1-vton" as const,
+ fps: 20,
+ width: 1088,
+ height: 624,
+ inputSchema: z.object({}),
+ },
+ "lucy-restyle": {
+ urlPath: "/v1/stream",
+ name: "lucy-restyle" as const,
+ fps: 25,
+ width: 1280,
+ height: 704,
+ inputSchema: z.object({}),
+ },
+ "lucy-restyle-2": {
+ urlPath: "/v1/stream",
+ name: "lucy-restyle-2" as const,
+ fps: 22,
+ width: 1280,
+ height: 704,
+ inputSchema: z.object({}),
+ },
+ "live-avatar": {
+ urlPath: "/v1/stream",
+ name: "live-avatar" as const,
+ fps: 25,
+ width: 1280,
+ height: 720,
+ inputSchema: z.object({}),
+ },
+ // Deprecated names (use canonical names above instead)
mirage: {
urlPath: "/v1/stream",
name: "mirage" as const,
@@ -242,6 +370,17 @@ const _models = {
},
},
image: {
+ // Canonical name
+ "lucy-image-2": {
+ urlPath: "/v1/generate/lucy-image-2",
+ queueUrlPath: "/v1/jobs/lucy-image-2",
+ name: "lucy-image-2" as const,
+ fps: 25,
+ width: 1280,
+ height: 704,
+ inputSchema: modelInputSchemas["lucy-image-2"],
+ },
+ // Deprecated name
"lucy-pro-i2i": {
urlPath: "/v1/generate/lucy-pro-i2i",
queueUrlPath: "/v1/jobs/lucy-pro-i2i",
@@ -253,14 +392,42 @@ const _models = {
},
},
video: {
- "lucy-pro-v2v": {
- urlPath: "/v1/generate/lucy-pro-v2v",
- queueUrlPath: "/v1/jobs/lucy-pro-v2v",
- name: "lucy-pro-v2v" as const,
+ // Canonical names
+ "lucy-clip": {
+ urlPath: "/v1/generate/lucy-clip",
+ queueUrlPath: "/v1/jobs/lucy-clip",
+ name: "lucy-clip" as const,
fps: 25,
width: 1280,
height: 704,
- inputSchema: modelInputSchemas["lucy-pro-v2v"],
+ inputSchema: modelInputSchemas["lucy-clip"],
+ },
+ "lucy-2": {
+ urlPath: "/v1/generate/lucy-2",
+ queueUrlPath: "/v1/jobs/lucy-2",
+ name: "lucy-2" as const,
+ fps: 20,
+ width: 1280,
+ height: 720,
+ inputSchema: modelInputSchemas["lucy-2"],
+ },
+ "lucy-2.1": {
+ urlPath: "/v1/generate/lucy-2.1",
+ queueUrlPath: "/v1/jobs/lucy-2.1",
+ name: "lucy-2.1" as const,
+ fps: 20,
+ width: 1088,
+ height: 624,
+ inputSchema: modelInputSchemas["lucy-2.1"],
+ },
+ "lucy-restyle-2": {
+ urlPath: "/v1/generate/lucy-restyle-2",
+ queueUrlPath: "/v1/jobs/lucy-restyle-2",
+ name: "lucy-restyle-2" as const,
+ fps: 22,
+ width: 1280,
+ height: 704,
+ inputSchema: modelInputSchemas["lucy-restyle-2"],
},
"lucy-motion": {
urlPath: "/v1/generate/lucy-motion",
@@ -271,6 +438,16 @@ const _models = {
height: 704,
inputSchema: modelInputSchemas["lucy-motion"],
},
+ // Deprecated names (use canonical names above instead)
+ "lucy-pro-v2v": {
+ urlPath: "/v1/generate/lucy-pro-v2v",
+ queueUrlPath: "/v1/jobs/lucy-pro-v2v",
+ name: "lucy-pro-v2v" as const,
+ fps: 25,
+ width: 1280,
+ height: 704,
+ inputSchema: modelInputSchemas["lucy-pro-v2v"],
+ },
"lucy-restyle-v2v": {
urlPath: "/v1/generate/lucy-restyle-v2v",
queueUrlPath: "/v1/jobs/lucy-restyle-v2v",
@@ -293,7 +470,20 @@ const _models = {
} as const;
export const models = {
+ /**
+ * Get a realtime streaming model identifier.
+ *
+ * Available options:
+ * - `"lucy-2"` - Lucy 2 realtime video editing (720p)
+ * - `"lucy-2.1"` - Lucy 2.1 realtime video editing
+ * - `"lucy-2.1-vton"` - Lucy 2.1 virtual try-on
+ * - `"lucy-restyle-2"` - Realtime video restyling
+ * - `"lucy-restyle"` - Legacy realtime restyling
+ * - `"lucy"` - Legacy Lucy realtime
+ * - `"live-avatar"` - Live avatar
+ */
realtime: (model: T): ModelDefinition => {
+ warnDeprecated(model);
const modelDefinition = _models.realtime[model];
if (!modelDefinition) {
throw createModelNotFoundError(model);
@@ -304,29 +494,32 @@ export const models = {
* Get a video model identifier.
*
* Available options:
- * - `"lucy-pro-v2v"` - Video-to-video
- * - `"lucy-restyle-v2v"` - Video-to-video (Restyling)
- * - `"lucy-2-v2v"` - Video-to-video (Long-form editing, 720p)
+ * - `"lucy-clip"` - Video-to-video editing
+ * - `"lucy-2"` - Long-form video editing (720p)
+ * - `"lucy-2.1"` - Long-form video editing (Lucy 2.1)
+ * - `"lucy-restyle-2"` - Video restyling
* - `"lucy-motion"` - Motion generation
*/
- video: (model: T): ModelDefinition => {
+ video: (model: T): ModelDefinition & { queueUrlPath: string } => {
+ warnDeprecated(model);
const modelDefinition = _models.video[model];
if (!modelDefinition) {
throw createModelNotFoundError(model);
}
- return modelDefinition as ModelDefinition;
+ return modelDefinition as ModelDefinition & { queueUrlPath: string };
},
/**
* Get an image model identifier.
*
* Available options:
- * - `"lucy-pro-i2i"` - Image-to-image
+ * - `"lucy-image-2"` - Image-to-image editing
*/
- image: (model: T): ModelDefinition => {
+ image: (model: T): ModelDefinition & { queueUrlPath: string } => {
+ warnDeprecated(model);
const modelDefinition = _models.image[model];
if (!modelDefinition) {
throw createModelNotFoundError(model);
}
- return modelDefinition as ModelDefinition;
+ return modelDefinition as ModelDefinition & { queueUrlPath: string };
},
};
diff --git a/packages/sdk/tests/e2e-realtime.test.ts b/packages/sdk/tests/e2e-realtime.test.ts
index 5b2651bd..d8464cd3 100644
--- a/packages/sdk/tests/e2e-realtime.test.ts
+++ b/packages/sdk/tests/e2e-realtime.test.ts
@@ -10,7 +10,20 @@ function createSyntheticStream(fps: number, width: number, height: number): Medi
return canvas.captureStream(fps);
}
-const REALTIME_MODELS: RealTimeModels[] = ["mirage", "mirage_v2", "lucy_v2v_720p_rt", "lucy_2_rt"];
+const REALTIME_MODELS: RealTimeModels[] = [
+ // Canonical names
+ "lucy-restyle",
+ "lucy-restyle-2",
+ "lucy",
+ "lucy-2",
+ "lucy-2.1",
+ "lucy-2.1-vton",
+ // Deprecated names
+ "mirage",
+ "mirage_v2",
+ "lucy_v2v_720p_rt",
+ "lucy_2_rt",
+];
const TIMEOUT = 1 * 60 * 1000; // 1 minute
describe.concurrent("Realtime E2E Tests", { timeout: TIMEOUT, retry: 2 }, () => {
diff --git a/packages/sdk/tests/e2e.test.ts b/packages/sdk/tests/e2e.test.ts
index e873d051..561d3b31 100644
--- a/packages/sdk/tests/e2e.test.ts
+++ b/packages/sdk/tests/e2e.test.ts
@@ -65,21 +65,21 @@ describe.concurrent("E2E Tests", { timeout: TIMEOUT, retry: 2 }, () => {
}
describe("Process API - Image Models", () => {
- it("lucy-pro-i2i: image-to-image", async () => {
+ it("lucy-image-2: image-to-image", async () => {
const result = await client.process({
- model: models.image("lucy-pro-i2i"),
+ model: models.image("lucy-image-2"),
prompt: "Oil painting in the style of Van Gogh",
data: imageBlob,
seed: 333,
enhance_prompt: false,
});
- await expectResult(result, "lucy-pro-i2i", ".png");
+ await expectResult(result, "lucy-image-2", ".png");
});
- it("lucy-pro-i2i: image-to-image with reference_image", async () => {
+ it("lucy-image-2: image-to-image with reference_image", async () => {
const result = await client.process({
- model: models.image("lucy-pro-i2i"),
+ model: models.image("lucy-image-2"),
prompt: "Add the object from the reference image",
data: imageBlob,
reference_image: imageBlob,
@@ -87,78 +87,150 @@ describe.concurrent("E2E Tests", { timeout: TIMEOUT, retry: 2 }, () => {
enhance_prompt: false,
});
- await expectResult(result, "lucy-pro-i2i-reference_image", ".png");
+ await expectResult(result, "lucy-image-2-reference_image", ".png");
+ });
+ });
+
+ describe("Process API - Image Models (deprecated names)", () => {
+ it("lucy-pro-i2i (deprecated): image-to-image", async () => {
+ const result = await client.process({
+ model: models.image("lucy-pro-i2i"),
+ prompt: "Oil painting in the style of Van Gogh",
+ data: imageBlob,
+ seed: 333,
+ enhance_prompt: false,
+ });
+
+ await expectResult(result, "lucy-pro-i2i", ".png");
});
});
describe("Queue API - Video Models", () => {
- it("lucy-pro-v2v: video-to-video", async () => {
+ it("lucy-clip: video-to-video", async () => {
const result = await client.queue.submitAndPoll({
- model: models.video("lucy-pro-v2v"),
+ model: models.video("lucy-clip"),
prompt: "Lego World animated style",
data: videoBlob,
seed: 999,
enhance_prompt: true,
});
- await expectResult(result, "lucy-pro-v2v", ".mp4");
+ await expectResult(result, "lucy-clip", ".mp4");
});
- it("lucy-restyle-v2v: video restyling (prompt)", async () => {
+ it("lucy-restyle-2: video restyling (prompt)", async () => {
const result = await client.queue.submitAndPoll({
- model: models.video("lucy-restyle-v2v"),
+ model: models.video("lucy-restyle-2"),
prompt: "Cyberpunk neon city style",
data: videoBlob,
seed: 777,
});
- await expectResult(result, "lucy-restyle-v2v-prompt", ".mp4");
+ await expectResult(result, "lucy-restyle-2-prompt", ".mp4");
});
- it("lucy-restyle-v2v: video restyling (reference_image)", async () => {
+ it("lucy-restyle-2: video restyling (reference_image)", async () => {
const result = await client.queue.submitAndPoll({
- model: models.video("lucy-restyle-v2v"),
+ model: models.video("lucy-restyle-2"),
reference_image: imageBlob,
data: videoBlob,
seed: 777,
});
- await expectResult(result, "lucy-restyle-v2v-reference_image", ".mp4");
+ await expectResult(result, "lucy-restyle-2-reference_image", ".mp4");
});
- it("lucy-2-v2v: video editing (prompt)", async () => {
+ it("lucy-2: video editing (prompt)", async () => {
const result = await client.queue.submitAndPoll({
- model: models.video("lucy-2-v2v"),
+ model: models.video("lucy-2"),
prompt: "Watercolor painting style with soft brushstrokes",
data: videoBlob,
seed: 42,
});
- await expectResult(result, "lucy-2-v2v-prompt", ".mp4");
+ await expectResult(result, "lucy-2-prompt", ".mp4");
});
- it("lucy-2-v2v: video editing (reference_image)", async () => {
+ it("lucy-2: video editing (reference_image)", async () => {
const result = await client.queue.submitAndPoll({
- model: models.video("lucy-2-v2v"),
+ model: models.video("lucy-2"),
prompt: "",
reference_image: imageBlob,
data: videoBlob,
seed: 42,
});
- await expectResult(result, "lucy-2-v2v-reference_image", ".mp4");
+ await expectResult(result, "lucy-2-reference_image", ".mp4");
});
- it("lucy-2-v2v: video editing (prompt + reference_image)", async () => {
+ it("lucy-2: video editing (prompt + reference_image)", async () => {
const result = await client.queue.submitAndPoll({
- model: models.video("lucy-2-v2v"),
+ model: models.video("lucy-2"),
prompt: "Watercolor painting style",
reference_image: imageBlob,
data: videoBlob,
seed: 42,
});
- await expectResult(result, "lucy-2-v2v-both", ".mp4");
+ await expectResult(result, "lucy-2-both", ".mp4");
+ });
+
+ it("lucy-2.1: video editing (prompt)", async () => {
+ const result = await client.queue.submitAndPoll({
+ model: models.video("lucy-2.1"),
+ prompt: "Watercolor painting style with soft brushstrokes",
+ data: videoBlob,
+ seed: 42,
+ });
+
+ await expectResult(result, "lucy-2.1-prompt", ".mp4");
+ });
+
+ it("lucy-2.1: video editing (reference_image)", async () => {
+ const result = await client.queue.submitAndPoll({
+ model: models.video("lucy-2.1"),
+ prompt: "",
+ reference_image: imageBlob,
+ data: videoBlob,
+ seed: 42,
+ });
+
+ await expectResult(result, "lucy-2.1-reference_image", ".mp4");
+ });
+
+ // Deprecated video model names (aliases)
+ it("lucy-pro-v2v (deprecated): video-to-video", async () => {
+ const result = await client.queue.submitAndPoll({
+ model: models.video("lucy-pro-v2v"),
+ prompt: "Lego World animated style",
+ data: videoBlob,
+ seed: 999,
+ enhance_prompt: true,
+ });
+
+ await expectResult(result, "lucy-pro-v2v", ".mp4");
+ });
+
+ it("lucy-restyle-v2v (deprecated): video restyling", async () => {
+ const result = await client.queue.submitAndPoll({
+ model: models.video("lucy-restyle-v2v"),
+ prompt: "Cyberpunk neon city style",
+ data: videoBlob,
+ seed: 777,
+ });
+
+ await expectResult(result, "lucy-restyle-v2v", ".mp4");
+ });
+
+ it("lucy-2-v2v (deprecated): video editing", async () => {
+ const result = await client.queue.submitAndPoll({
+ model: models.video("lucy-2-v2v"),
+ prompt: "Watercolor painting style with soft brushstrokes",
+ data: videoBlob,
+ seed: 42,
+ });
+
+ await expectResult(result, "lucy-2-v2v", ".mp4");
});
it("lucy-motion: motion-guided image-to-video", async () => {
diff --git a/packages/sdk/tests/unit.test.ts b/packages/sdk/tests/unit.test.ts
index fab9e622..339d9f09 100644
--- a/packages/sdk/tests/unit.test.ts
+++ b/packages/sdk/tests/unit.test.ts
@@ -1,7 +1,8 @@
import { HttpResponse, http } from "msw";
import { setupServer } from "msw/node";
import { afterAll, afterEach, beforeAll, beforeEach, describe, expect, it, vi } from "vitest";
-import { createDecartClient, models } from "../src/index.js";
+import { createDecartClient, isRealtimeModel, isVideoModel, models } from "../src/index.js";
+import { _resetDeprecationWarnings } from "../src/shared/model.js";
const MOCK_RESPONSE_DATA = new Uint8Array([0x00, 0x01, 0x02]).buffer;
const TEST_API_KEY = "test-api-key";
@@ -3402,3 +3403,173 @@ describe("CustomModelDefinition", () => {
expect(result.success).toBe(false);
});
});
+
+describe("Canonical Model Names", () => {
+ describe("Realtime canonical models", () => {
+ it("lucy canonical name works", () => {
+ const model = models.realtime("lucy");
+ expect(model.name).toBe("lucy");
+ expect(model.urlPath).toBe("/v1/stream");
+ expect(model.fps).toBe(25);
+ expect(model.width).toBe(1280);
+ expect(model.height).toBe(704);
+ });
+
+ it("lucy-2 canonical name works", () => {
+ const model = models.realtime("lucy-2");
+ expect(model.name).toBe("lucy-2");
+ expect(model.urlPath).toBe("/v1/stream");
+ expect(model.fps).toBe(20);
+ expect(model.width).toBe(1280);
+ expect(model.height).toBe(720);
+ });
+
+ it("lucy-2.1 canonical name works", () => {
+ const model = models.realtime("lucy-2.1");
+ expect(model.name).toBe("lucy-2.1");
+ expect(model.urlPath).toBe("/v1/stream");
+ expect(model.fps).toBe(20);
+ expect(model.width).toBe(1088);
+ expect(model.height).toBe(624);
+ });
+
+ it("lucy-2.1-vton canonical name works", () => {
+ const model = models.realtime("lucy-2.1-vton");
+ expect(model.name).toBe("lucy-2.1-vton");
+ expect(model.urlPath).toBe("/v1/stream");
+ expect(model.fps).toBe(20);
+ expect(model.width).toBe(1088);
+ expect(model.height).toBe(624);
+ });
+
+ it("lucy-restyle canonical name works", () => {
+ const model = models.realtime("lucy-restyle");
+ expect(model.name).toBe("lucy-restyle");
+ expect(model.fps).toBe(25);
+ });
+
+ it("lucy-restyle-2 canonical name works", () => {
+ const model = models.realtime("lucy-restyle-2");
+ expect(model.name).toBe("lucy-restyle-2");
+ expect(model.fps).toBe(22);
+ });
+
+ it("live-avatar canonical name works", () => {
+ const model = models.realtime("live-avatar");
+ expect(model.name).toBe("live-avatar");
+ expect(model.fps).toBe(25);
+ expect(model.width).toBe(1280);
+ expect(model.height).toBe(720);
+ });
+ });
+
+ describe("Video canonical models", () => {
+ it("lucy-clip canonical name works", () => {
+ const model = models.video("lucy-clip");
+ expect(model.name).toBe("lucy-clip");
+ expect(model.urlPath).toBe("/v1/generate/lucy-clip");
+ expect(model.queueUrlPath).toBe("/v1/jobs/lucy-clip");
+ expect(model.fps).toBe(25);
+ });
+
+ it("lucy-2 as video model works", () => {
+ const model = models.video("lucy-2");
+ expect(model.name).toBe("lucy-2");
+ expect(model.urlPath).toBe("/v1/generate/lucy-2");
+ expect(model.queueUrlPath).toBe("/v1/jobs/lucy-2");
+ expect(model.fps).toBe(20);
+ });
+
+ it("lucy-2.1 as video model works", () => {
+ const model = models.video("lucy-2.1");
+ expect(model.name).toBe("lucy-2.1");
+ expect(model.urlPath).toBe("/v1/generate/lucy-2.1");
+ expect(model.queueUrlPath).toBe("/v1/jobs/lucy-2.1");
+ });
+
+ it("lucy-restyle-2 as video model works", () => {
+ const model = models.video("lucy-restyle-2");
+ expect(model.name).toBe("lucy-restyle-2");
+ expect(model.urlPath).toBe("/v1/generate/lucy-restyle-2");
+ expect(model.queueUrlPath).toBe("/v1/jobs/lucy-restyle-2");
+ });
+ });
+
+ describe("Image canonical models", () => {
+ it("lucy-image-2 canonical name works", () => {
+ const model = models.image("lucy-image-2");
+ expect(model.name).toBe("lucy-image-2");
+ expect(model.urlPath).toBe("/v1/generate/lucy-image-2");
+ expect(model.queueUrlPath).toBe("/v1/jobs/lucy-image-2");
+ });
+ });
+
+ describe("Dual-surface models", () => {
+ it("lucy-2 is both a realtime and video model", () => {
+ expect(isRealtimeModel("lucy-2")).toBe(true);
+ expect(isVideoModel("lucy-2")).toBe(true);
+ });
+
+ it("lucy-2.1 is both a realtime and video model", () => {
+ expect(isRealtimeModel("lucy-2.1")).toBe(true);
+ expect(isVideoModel("lucy-2.1")).toBe(true);
+ });
+
+ it("lucy-restyle-2 is both a realtime and video model", () => {
+ expect(isRealtimeModel("lucy-restyle-2")).toBe(true);
+ expect(isVideoModel("lucy-restyle-2")).toBe(true);
+ });
+ });
+
+ describe("Deprecated names still work", () => {
+ it("lucy_2_rt still works as realtime model", () => {
+ const model = models.realtime("lucy_2_rt");
+ expect(model.name).toBe("lucy_2_rt");
+ });
+
+ it("mirage_v2 still works as realtime model", () => {
+ const model = models.realtime("mirage_v2");
+ expect(model.name).toBe("mirage_v2");
+ });
+
+ it("live_avatar still works as realtime model", () => {
+ const model = models.realtime("live_avatar");
+ expect(model.name).toBe("live_avatar");
+ });
+
+ it("lucy-pro-v2v still works as video model", () => {
+ const model = models.video("lucy-pro-v2v");
+ expect(model.name).toBe("lucy-pro-v2v");
+ });
+
+ it("lucy-pro-i2i still works as image model", () => {
+ const model = models.image("lucy-pro-i2i");
+ expect(model.name).toBe("lucy-pro-i2i");
+ });
+ });
+
+ describe("Deprecation warnings", () => {
+ it("warns when using deprecated model name", () => {
+ _resetDeprecationWarnings();
+ const warnSpy = vi.spyOn(console, "warn").mockImplementation(() => {});
+
+ models.video("lucy-pro-v2v");
+ expect(warnSpy).toHaveBeenCalledWith(
+ expect.stringContaining('Model "lucy-pro-v2v" is deprecated. Use "lucy-clip" instead.'),
+ );
+
+ warnSpy.mockRestore();
+ });
+
+ it("warns only once per deprecated alias", () => {
+ _resetDeprecationWarnings();
+ const warnSpy = vi.spyOn(console, "warn").mockImplementation(() => {});
+
+ models.video("lucy-pro-v2v");
+ models.video("lucy-pro-v2v");
+ expect(warnSpy).toHaveBeenCalledTimes(1);
+
+ warnSpy.mockRestore();
+ });
+ });
+});