Loading...
Loading...
Help users integrate Runway video generation APIs (text-to-video, image-to-video, video-to-video)
npx skill4agent add runwayml/skills integrate-videoPREREQUISITE: Runfirst. Run+check-compatibilityto load the latest API reference before integrating. Requires+fetch-api-referencefor API credentials. Requires+setup-api-keywhen the user has local files to use as input.+integrate-uploads
| Model | Best For | Input | Cost | Speed |
|---|---|---|---|---|
| Highest quality, general purpose | Text and/or Image | 12 credits/sec | Standard |
| Fast, image-driven | Image required | 5 credits/sec | Fast |
| Video editing/transformation | Video + Text/Image | 15 credits/sec | Standard |
| Premium Google model | Text/Image | 40 credits/sec | Standard |
| High quality Google model | Text/Image | 20-40 credits/sec | Standard |
| Fast Google model | Text/Image | 10-15 credits/sec | Fast |
gen4.5gen4_turboveo3.1_fastveo3gen4_alephPOST /v1/text_to_videogen4.5veo3veo3.1veo3.1_fast// Node.js SDK
import RunwayML from '@runwayml/sdk';
const client = new RunwayML();
const task = await client.textToVideo.create({
model: 'gen4.5',
promptText: 'A golden retriever running through a field of wildflowers at sunset',
ratio: '1280:720',
duration: 5
}).waitForTaskOutput();
// task.output is an array of signed URLs
const videoUrl = task.output[0];# Python SDK
from runwayml import RunwayML
client = RunwayML()
task = client.text_to_video.create(
model='gen4.5',
prompt_text='A golden retriever running through a field of wildflowers at sunset',
ratio='1280:720',
duration=5
).wait_for_task_output()
video_url = task.output[0]POST /v1/image_to_videogen4.5gen4_turboveo3veo3.1veo3.1_fast// Node.js SDK
const task = await client.imageToVideo.create({
model: 'gen4.5',
promptImage: 'https://example.com/landscape.jpg',
promptText: 'Camera slowly pans right revealing a mountain range',
ratio: '1280:720',
duration: 5
}).waitForTaskOutput();# Python SDK
task = client.image_to_video.create(
model='gen4.5',
prompt_image='https://example.com/landscape.jpg',
prompt_text='Camera slowly pans right revealing a mountain range',
ratio='1280:720',
duration=5
).wait_for_task_output()+integrate-uploads// Upload local file first
import fs from 'fs';
const upload = await client.uploads.createEphemeral(
fs.createReadStream('/path/to/image.jpg')
);
const task = await client.imageToVideo.create({
model: 'gen4.5',
promptImage: upload.runwayUri, // Use the runway:// URI
promptText: 'The scene comes to life with gentle wind',
ratio: '1280:720',
duration: 5
}).waitForTaskOutput();POST /v1/video_to_videogen4_aleph// Node.js SDK
const task = await client.videoToVideo.create({
model: 'gen4_aleph',
promptVideo: 'https://example.com/source.mp4',
promptText: 'Transform into an animated cartoon style',
ratio: '1280:720',
duration: 5
}).waitForTaskOutput();POST /v1/character_performanceact_twoconst task = await client.characterPerformance.create({
model: 'act_two',
promptImage: 'https://example.com/character.jpg',
promptPerformance: 'https://example.com/performance.mp4',
ratio: '1280:720',
duration: 5
}).waitForTaskOutput();| Parameter | Type | Description |
|---|---|---|
| string | Model ID (required) |
| string | Text prompt describing the video |
| string | URL, data URI, or |
| string | Aspect ratio, e.g. |
| number | Video length in seconds (2-10) |
+integrate-uploadsTaskFailedErrorimport RunwayML from '@runwayml/sdk';
import express from 'express';
const client = new RunwayML();
const app = express();
app.use(express.json());
app.post('/api/generate-video', async (req, res) => {
try {
const { prompt, imageUrl, model = 'gen4.5', duration = 5 } = req.body;
const params = {
model,
promptText: prompt,
ratio: '1280:720',
duration
};
let task;
if (imageUrl) {
task = await client.imageToVideo.create({
...params,
promptImage: imageUrl
}).waitForTaskOutput();
} else {
task = await client.textToVideo.create(params).waitForTaskOutput();
}
res.json({ videoUrl: task.output[0] });
} catch (error) {
console.error('Video generation failed:', error);
res.status(500).json({ error: error.message });
}
});// app/api/generate-video/route.ts
import RunwayML from '@runwayml/sdk';
import { NextRequest, NextResponse } from 'next/server';
const client = new RunwayML();
export async function POST(request: NextRequest) {
const { prompt, imageUrl } = await request.json();
try {
const task = imageUrl
? await client.imageToVideo.create({
model: 'gen4.5',
promptImage: imageUrl,
promptText: prompt,
ratio: '1280:720',
duration: 5
}).waitForTaskOutput()
: await client.textToVideo.create({
model: 'gen4.5',
promptText: prompt,
ratio: '1280:720',
duration: 5
}).waitForTaskOutput();
return NextResponse.json({ videoUrl: task.output[0] });
} catch (error) {
return NextResponse.json(
{ error: error instanceof Error ? error.message : 'Generation failed' },
{ status: 500 }
);
}
}from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from runwayml import RunwayML
app = FastAPI()
client = RunwayML()
class VideoRequest(BaseModel):
prompt: str
image_url: str | None = None
model: str = "gen4.5"
duration: int = 5
@app.post("/api/generate-video")
async def generate_video(req: VideoRequest):
try:
if req.image_url:
task = client.image_to_video.create(
model=req.model,
prompt_image=req.image_url,
prompt_text=req.prompt,
ratio="1280:720",
duration=req.duration
).wait_for_task_output()
else:
task = client.text_to_video.create(
model=req.model,
prompt_text=req.prompt,
ratio="1280:720",
duration=req.duration
).wait_for_task_output()
return {"video_url": task.output[0]}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))gen4_turbogen4_alephwaitForTaskOutput()+integrate-uploadsrunway://