Loading...
Loading...
Design cinematic image and video prompts for genmedia. Use this for shot language, camera movement, lighting, lens choices, color grade, film texture, scene blocking, and production-ready visual direction.
npx skill4agent add fal-ai-community/skills cinematographyreferences/shot-language.mdreferences/lighting-lens-color.mdreferences/examples.mdmodel-routinggenmedia models --endpoint_id openai/gpt-image-2 --json
genmedia models --endpoint_id fal-ai/nano-banana-pro --json
genmedia models --endpoint_id bytedance/seedance-2.0/text-to-video --json
genmedia models --endpoint_id bytedance/seedance-2.0/image-to-video --json
genmedia models --endpoint_id xai/grok-imagine-video/text-to-video --jsongenmedia models "cinematic video generation camera movement" --json
genmedia docs "video generation camera movement prompt" --jsongenmedia schema <endpoint_id> --json
genmedia pricing <endpoint_id> --jsongenmedia upload ./frame.png --jsongenmedia run <endpoint_id> \
--prompt "<cinematography prompt>" \
--download "./outputs/cinema/{request_id}_{index}.{ext}" \
--jsongenmedia run <endpoint_id> \
--prompt "<shot prompt>" \
--image_url "<uploaded frame if supported>" \
--async \
--json
genmedia status <endpoint_id> <request_id> \
--download "./outputs/cinema/{request_id}_{index}.{ext}" \
--json[subject] in [context], framed as [shot size and angle], [lens feel],
[lighting setup], [camera movement if video], [color grade], [texture],
[duration or aspect ratio], [continuity constraints]openai/gpt-image-2openai/gpt-image-2fal-ai/nano-banana-profal-ai/nano-banana-2fal-ai/flux-2/klein/9bbytedance/seedance-2.0/text-to-videobytedance/seedance-2.0/image-to-videobytedance/seedance-2.0/image-to-videoxai/grok-imagine-video/text-to-videoxai/grok-imagine-video/image-to-videodownloaded_files[]