feat: add comfy server examples

This commit is contained in:
badayvedat 2024-04-23 05:19:46 +03:00
parent 18eb6ec28d
commit 92e554a527
9 changed files with 1074 additions and 0 deletions

View File

@ -0,0 +1,205 @@
'use client';
import * as fal from '@fal-ai/serverless-client';
import { useMemo, useState } from 'react';
import getWorkflow from './workflow';
// @snippet:start(client.config)
fal.config({
proxyUrl: '/api/fal/proxy', // the built-int nextjs proxy
// proxyUrl: 'http://localhost:3333/api/fal/proxy', // or your own external proxy
});
// @snippet:end
// @snippet:start(client.result.type)
type Image = {
filename: string;
subfolder: string;
type: string;
url: string;
};
type Result = {
url: string;
outputs: Record<string, any>[];
images: Image[];
};
// @snippet:end
type ErrorProps = {
error: any;
};
function Error(props: ErrorProps) {
if (!props.error) {
return null;
}
return (
<div
className="p-4 mb-4 text-sm text-red-800 rounded bg-red-50 dark:bg-gray-800 dark:text-red-400"
role="alert"
>
<span className="font-medium">Error</span> {props.error.message}
</div>
);
}
const DEFAULT_PROMPT =
'photograph of victorian woman with wings, sky clouds, meadow grass';
export function Index() {
// @snippet:start("client.ui.state")
// Input state
const [prompt, setPrompt] = useState<string>(DEFAULT_PROMPT);
const [imageFile, setImageFile] = useState<File | null>(null);
// Result state
const [loading, setLoading] = useState(false);
const [error, setError] = useState<Error | null>(null);
const [result, setResult] = useState<Result | null>(null);
const [logs, setLogs] = useState<string[]>([]);
const [elapsedTime, setElapsedTime] = useState<number>(0);
// @snippet:end
const video = useMemo(() => {
if (!result) {
return null;
}
return result;
}, [result]);
const reset = () => {
setLoading(false);
setError(null);
setResult(null);
setLogs([]);
setElapsedTime(0);
};
const getImageURL = (result: Result) => {
return result.outputs[9].images[0];
};
const generateVideo = async () => {
reset();
// @snippet:start("client.queue.subscribe")
setLoading(true);
const start = Date.now();
try {
const result: Result = await fal.subscribe('fal-ai/comfy-server', {
input: getWorkflow({
prompt: prompt,
loadimage_1: imageFile,
}),
pollInterval: 3000, // Default is 1000 (every 1s)
logs: true,
onQueueUpdate(update) {
setElapsedTime(Date.now() - start);
if (
update.status === 'IN_PROGRESS' ||
update.status === 'COMPLETED'
) {
setLogs((update.logs || []).map((log) => log.message));
}
},
});
setResult(getImageURL(result));
} catch (error: any) {
setError(error);
} finally {
setLoading(false);
setElapsedTime(Date.now() - start);
}
// @snippet:end
};
return (
<div className="min-h-screen dark:bg-gray-900 bg-gray-100">
<main className="container dark:text-gray-50 text-gray-900 flex flex-col items-center justify-center w-full flex-1 py-10 space-y-8">
<h1 className="text-4xl font-bold mb-8">
Comfy SD1.5 - Image to Image
</h1>
<div className="text-lg w-full">
<label htmlFor="image" className="block mb-2 text-current">
Image
</label>
<div className="w-full flex flex-col space-y-4">
<div className="mx-auto flex">
{imageFile && (
<img
src={URL.createObjectURL(imageFile)}
alt=""
className="mx-auto w-1/2"
/>
)}
</div>
<input
className="w-full text-sm p-2 rounded bg-black/10 dark:bg-white/5 border border-black/20 dark:border-white/10"
id="image_url"
name="image_url"
type="file"
placeholder="Choose a file"
accept="image/*"
onChange={(e) => setImageFile(e.target.files?.[0] ?? null)}
/>
</div>
</div>
<div className="text-lg w-full">
<label htmlFor="prompt" className="block mb-2 text-current">
Prompt
</label>
<input
className="w-full text-lg p-2 rounded bg-black/10 dark:bg-white/5 border border-black/20 dark:border-white/10"
id="prompt"
name="prompt"
placeholder="Imagine..."
value={prompt}
autoComplete="off"
onChange={(e) => setPrompt(e.target.value)}
onBlur={(e) => setPrompt(e.target.value.trim())}
/>
</div>
<button
onClick={(e) => {
e.preventDefault();
generateVideo();
}}
className="bg-indigo-600 hover:bg-indigo-700 text-white font-bold text-lg py-3 px-6 mx-auto rounded focus:outline-none focus:shadow-outline"
disabled={loading}
>
{loading ? 'Generating...' : 'Generate Image'}
</button>
<Error error={error} />
<div className="w-full flex flex-col space-y-4">
<div className="mx-auto">
{video && (
// eslint-disable-next-line @next/next/no-img-element
<img src={video.url} alt="" />
)}
</div>
<div className="space-y-2">
<h3 className="text-xl font-light">JSON Result</h3>
<p className="text-sm text-current/80">
{`Elapsed Time (seconds): ${(elapsedTime / 1000).toFixed(2)}`}
</p>
<pre className="text-sm bg-black/70 text-white/80 font-mono h-60 rounded whitespace-pre overflow-auto w-full">
{result
? JSON.stringify(result, null, 2)
: '// result pending...'}
</pre>
</div>
<div className="space-y-2">
<h3 className="text-xl font-light">Logs</h3>
<pre className="text-sm bg-black/70 text-white/80 font-mono h-60 rounded whitespace-pre overflow-auto w-full">
{logs.filter(Boolean).join('\n')}
</pre>
</div>
</div>
</main>
</div>
);
}
export default Index;

View File

@ -0,0 +1,102 @@
// This workflow is generated with ComfyUI-fal
const WORKFLOW = {
prompt: {
'3': {
inputs: {
seed: 280823642470253,
steps: 20,
cfg: 8,
sampler_name: 'dpmpp_2m',
scheduler: 'normal',
denoise: 0.8700000000000001,
model: ['14', 0],
positive: ['6', 0],
negative: ['7', 0],
latent_image: ['12', 0],
},
class_type: 'KSampler',
},
'6': {
inputs: {
text: ['15', 0],
clip: ['14', 1],
},
class_type: 'CLIPTextEncode',
},
'7': {
inputs: {
text: 'watermark, text\n',
clip: ['14', 1],
},
class_type: 'CLIPTextEncode',
},
'8': {
inputs: {
samples: ['3', 0],
vae: ['14', 2],
},
class_type: 'VAEDecode',
},
'9': {
inputs: {
filename_prefix: 'ComfyUI',
images: ['8', 0],
},
class_type: 'SaveImage',
},
'10': {
inputs: {
image: 'example.png',
upload: 'image',
},
class_type: 'LoadImage',
},
'12': {
inputs: {
pixels: ['10', 0],
vae: ['14', 2],
},
class_type: 'VAEEncode',
},
'14': {
inputs: {
ckpt_name: 'v1-5-pruned-emaonly.ckpt',
},
class_type: 'CheckpointLoaderSimple',
},
'15': {
inputs: {
name: 'prompt',
value:
'photograph of victorian woman with wings, sky clouds, meadow grass\n',
},
class_type: 'StringInput_fal',
},
},
extra_data: {},
fal_inputs_dev_info: {
loadimage_1: {
key: ['10', 'inputs', 'image'],
class_type: 'LoadImage',
},
prompt: {
key: ['15', 'inputs', 'value'],
class_type: 'StringInput_fal',
},
},
fal_inputs: {
loadimage_1: 'example_url',
prompt:
'photograph of victorian woman with wings, sky clouds, meadow grass\n',
},
};
export default function getWorkflow(object: any) {
let newWorkflow = JSON.parse(JSON.stringify(WORKFLOW));
newWorkflow.fal_inputs = {
...newWorkflow.fal_inputs,
...object,
};
return newWorkflow;
}

View File

@ -0,0 +1,183 @@
'use client';
import * as fal from '@fal-ai/serverless-client';
import { useMemo, useState } from 'react';
import getWorkflow from './workflow';
// @snippet:start(client.config)
fal.config({
proxyUrl: '/api/fal/proxy', // the built-int nextjs proxy
// proxyUrl: 'http://localhost:3333/api/fal/proxy', // or your own external proxy
});
// @snippet:end
// @snippet:start(client.result.type)
type Image = {
filename: string;
subfolder: string;
type: string;
url: string;
};
type Result = {
url: string;
outputs: Record<string, any>[];
images: Image[];
};
// @snippet:end
type ErrorProps = {
error: any;
};
function Error(props: ErrorProps) {
if (!props.error) {
return null;
}
return (
<div
className="p-4 mb-4 text-sm text-red-800 rounded bg-red-50 dark:bg-gray-800 dark:text-red-400"
role="alert"
>
<span className="font-medium">Error</span> {props.error.message}
</div>
);
}
export function Index() {
// @snippet:start("client.ui.state")
// Input state
const [imageFile, setImageFile] = useState<File | null>(null);
// Result state
const [loading, setLoading] = useState(false);
const [error, setError] = useState<Error | null>(null);
const [result, setResult] = useState<Result | null>(null);
const [logs, setLogs] = useState<string[]>([]);
const [elapsedTime, setElapsedTime] = useState<number>(0);
// @snippet:end
const video = useMemo(() => {
if (!result) {
return null;
}
return result;
}, [result]);
const reset = () => {
setLoading(false);
setError(null);
setResult(null);
setLogs([]);
setElapsedTime(0);
};
const getImageURL = (result: Result) => {
return result.outputs[10].images[0];
};
const generateVideo = async () => {
reset();
// @snippet:start("client.queue.subscribe")
setLoading(true);
const start = Date.now();
try {
const result: Result = await fal.subscribe('fal-ai/comfy-server', {
input: getWorkflow({
loadimage_1: imageFile,
}),
pollInterval: 3000, // Default is 1000 (every 1s)
logs: true,
onQueueUpdate(update) {
setElapsedTime(Date.now() - start);
if (
update.status === 'IN_PROGRESS' ||
update.status === 'COMPLETED'
) {
setLogs((update.logs || []).map((log) => log.message));
}
},
});
setResult(getImageURL(result));
} catch (error: any) {
setError(error);
} finally {
setLoading(false);
setElapsedTime(Date.now() - start);
}
// @snippet:end
};
return (
<div className="min-h-screen dark:bg-gray-900 bg-gray-100">
<main className="container dark:text-gray-50 text-gray-900 flex flex-col items-center justify-center w-full flex-1 py-10 space-y-8">
<h1 className="text-4xl font-bold mb-8">Comfy SVD - Image to Video</h1>
<div className="text-lg w-full">
<label htmlFor="image" className="block mb-2 text-current">
Image
</label>
<div className="w-full flex flex-col space-y-4">
<div className="mx-auto flex">
{imageFile && (
<img
src={URL.createObjectURL(imageFile)}
alt=""
className="mx-auto w-1/2"
/>
)}
</div>
<input
className="w-full text-sm p-2 rounded bg-black/10 dark:bg-white/5 border border-black/20 dark:border-white/10"
id="image_url"
name="image_url"
type="file"
placeholder="Choose a file"
accept="image/*"
onChange={(e) => setImageFile(e.target.files?.[0] ?? null)}
/>
</div>
</div>
<button
onClick={(e) => {
e.preventDefault();
generateVideo();
}}
className="bg-indigo-600 hover:bg-indigo-700 text-white font-bold text-lg py-3 px-6 mx-auto rounded focus:outline-none focus:shadow-outline"
disabled={loading}
>
{loading ? 'Generating...' : 'Generate Video'}
</button>
<Error error={error} />
<div className="w-full flex flex-col space-y-4">
<div className="mx-auto">
{video && (
// eslint-disable-next-line @next/next/no-img-element
<img src={video.url} alt="" />
)}
</div>
<div className="space-y-2">
<h3 className="text-xl font-light">JSON Result</h3>
<p className="text-sm text-current/80">
{`Elapsed Time (seconds): ${(elapsedTime / 1000).toFixed(2)}`}
</p>
<pre className="text-sm bg-black/70 text-white/80 font-mono h-60 rounded whitespace-pre overflow-auto w-full">
{result
? JSON.stringify(result, null, 2)
: '// result pending...'}
</pre>
</div>
<div className="space-y-2">
<h3 className="text-xl font-light">Logs</h3>
<pre className="text-sm bg-black/70 text-white/80 font-mono h-60 rounded whitespace-pre overflow-auto w-full">
{logs.filter(Boolean).join('\n')}
</pre>
</div>
</div>
</main>
</div>
);
}
export default Index;

View File

@ -0,0 +1,91 @@
const WORKFLOW = {
prompt: {
'3': {
inputs: {
seed: 351912937281939,
steps: 20,
cfg: 2.5,
sampler_name: 'euler',
scheduler: 'karras',
denoise: 1,
model: ['14', 0],
positive: ['12', 0],
negative: ['12', 1],
latent_image: ['12', 2],
},
class_type: 'KSampler',
},
'8': {
inputs: {
samples: ['3', 0],
vae: ['15', 2],
},
class_type: 'VAEDecode',
},
'10': {
inputs: {
filename_prefix: 'ComfyUI',
fps: 10,
lossless: false,
quality: 85,
method: 'default',
images: ['8', 0],
},
class_type: 'SaveAnimatedWEBP',
},
'12': {
inputs: {
width: 1024,
height: 576,
video_frames: 14,
motion_bucket_id: 127,
fps: 6,
augmentation_level: 0,
clip_vision: ['15', 1],
init_image: ['23', 0],
vae: ['15', 2],
},
class_type: 'SVD_img2vid_Conditioning',
},
'14': {
inputs: {
min_cfg: 1,
model: ['15', 0],
},
class_type: 'VideoLinearCFGGuidance',
},
'15': {
inputs: {
ckpt_name: 'svd.safetensors',
},
class_type: 'ImageOnlyCheckpointLoader',
},
'23': {
inputs: {
image: '18.png',
upload: 'image',
},
class_type: 'LoadImage',
},
},
extra_data: {},
fal_inputs_dev_info: {
loadimage_1: {
key: ['23', 'inputs', 'image'],
class_type: 'LoadImage',
},
},
fal_inputs: {
loadimage_1: 'example_url',
},
};
export default function getWorkflow(object: any) {
let newWorkflow = JSON.parse(JSON.stringify(WORKFLOW));
newWorkflow.fal_inputs = {
...newWorkflow.fal_inputs,
...object,
};
return newWorkflow;
}

View File

@ -0,0 +1,40 @@
'use client';
import { useRouter } from 'next/navigation';
export default function Index() {
const router = useRouter(); // Use correct router
return (
<div className="min-h-screen dark:bg-gray-900 bg-gray-100">
<main className="container mx-auto flex flex-col items-center justify-center w-full flex-1 py-12 px-4 sm:px-6 lg:px-8 text-center">
<h1 className="text-4xl font-bold mb-8">
Serverless Comfy Workflow Examples powered by{' '}
<code className="font-light text-pink-600">fal</code>
</h1>
<p className="mt-2 text-lg text-gray-400 max-w-2xl">
Learn how to use our fal-js to execute Comfy workflows.
</p>
<div className="mt-12 grid grid-cols-1 gap-3 md:grid-cols-3 lg:grid-cols-3">
<button
onClick={() => router.push('/comfy/text_to_image')}
className="px-6 py-3 bg-blue-600 hover:bg-blue-500 text-white rounded-lg shadow-md transition-transform transform hover:-translate-y-1"
>
Text to Image
</button>
<button
onClick={() => router.push('/comfy/image_to_image')}
className="px-6 py-3 bg-blue-600 hover:bg-blue-500 text-white rounded-lg shadow-md transition-transform transform hover:-translate-y-1"
>
Image to Image
</button>
<button
onClick={() => router.push('/comfy/image_to_video')}
className="px-6 py-3 bg-blue-600 hover:bg-blue-500 text-white rounded-lg shadow-md transition-transform transform hover:-translate-y-1"
>
Image to Video
</button>
</div>
</main>
</div>
);
}

View File

@ -0,0 +1,173 @@
'use client';
import * as fal from '@fal-ai/serverless-client';
import { useMemo, useState } from 'react';
import getWorkflow from './workflow';
// @snippet:start(client.config)
fal.config({
proxyUrl: '/api/fal/proxy', // the built-int nextjs proxy
// proxyUrl: 'http://localhost:3333/api/fal/proxy', // or your own external proxy
});
// @snippet:end
// @snippet:start(client.result.type)
type Image = {
filename: string;
subfolder: string;
type: string;
url: string;
};
type Result = {
url: string;
outputs: Record<string, any>[];
images: Image[];
};
// @snippet:end
type ErrorProps = {
error: any;
};
function Error(props: ErrorProps) {
if (!props.error) {
return null;
}
return (
<div
className="p-4 mb-4 text-sm text-red-800 rounded bg-red-50 dark:bg-gray-800 dark:text-red-400"
role="alert"
>
<span className="font-medium">Error</span> {props.error.message}
</div>
);
}
const DEFAULT_PROMPT =
'a city landscape of a cyberpunk metropolis, raining, purple, pink and teal neon lights, highly detailed, uhd';
export function Index() {
// @snippet:start("client.ui.state")
// Input state
const [prompt, setPrompt] = useState<string>(DEFAULT_PROMPT);
// Result state
const [loading, setLoading] = useState(false);
const [error, setError] = useState<Error | null>(null);
const [result, setResult] = useState<Result | null>(null);
const [logs, setLogs] = useState<string[]>([]);
const [elapsedTime, setElapsedTime] = useState<number>(0);
// @snippet:end
const image = useMemo(() => {
if (!result) {
return null;
}
return result;
}, [result]);
const reset = () => {
setLoading(false);
setError(null);
setResult(null);
setLogs([]);
setElapsedTime(0);
};
const getImageURL = (result: Result) => {
return result.outputs[9].images[0];
};
const generateImage = async () => {
reset();
// @snippet:start("client.queue.subscribe")
setLoading(true);
const start = Date.now();
try {
const result: Result = await fal.subscribe('fal-ai/comfy-server', {
input: getWorkflow({}),
pollInterval: 3000, // Default is 1000 (every 1s)
logs: true,
onQueueUpdate(update) {
setElapsedTime(Date.now() - start);
if (
update.status === 'IN_PROGRESS' ||
update.status === 'COMPLETED'
) {
setLogs((update.logs || []).map((log) => log.message));
}
},
});
setResult(getImageURL(result));
} catch (error: any) {
setError(error);
} finally {
setLoading(false);
setElapsedTime(Date.now() - start);
}
// @snippet:end
};
return (
<div className="min-h-screen dark:bg-gray-900 bg-gray-100">
<main className="container dark:text-gray-50 text-gray-900 flex flex-col items-center justify-center w-full flex-1 py-10 space-y-8">
<h1 className="text-4xl font-bold mb-8">Comfy SDXL - Text to Image</h1>
<div className="text-lg w-full">
<label htmlFor="prompt" className="block mb-2 text-current">
Prompt
</label>
<input
className="w-full text-lg p-2 rounded bg-black/10 dark:bg-white/5 border border-black/20 dark:border-white/10"
id="prompt"
name="prompt"
placeholder="Imagine..."
value={prompt}
autoComplete="off"
onChange={(e) => setPrompt(e.target.value)}
onBlur={(e) => setPrompt(e.target.value.trim())}
/>
</div>
<button
onClick={(e) => {
e.preventDefault();
generateImage();
}}
className="bg-indigo-600 hover:bg-indigo-700 text-white font-bold text-lg py-3 px-6 mx-auto rounded focus:outline-none focus:shadow-outline"
disabled={loading}
>
{loading ? 'Generating...' : 'Generate Image'}
</button>
<Error error={error} />
<div className="w-full flex flex-col space-y-4">
<div className="mx-auto">
{image && (
// eslint-disable-next-line @next/next/no-img-element
<img src={image.url} alt="" />
)}
</div>
<div className="space-y-2">
<h3 className="text-xl font-light">JSON Result</h3>
<p className="text-sm text-current/80">
{`Elapsed Time (seconds): ${(elapsedTime / 1000).toFixed(2)}`}
</p>
<pre className="text-sm bg-black/70 text-white/80 font-mono h-60 rounded whitespace-pre overflow-auto w-full">
{result
? JSON.stringify(result, null, 2)
: '// result pending...'}
</pre>
</div>
<div className="space-y-2">
<h3 className="text-xl font-light">Logs</h3>
<pre className="text-sm bg-black/70 text-white/80 font-mono h-60 rounded whitespace-pre overflow-auto w-full">
{logs.filter(Boolean).join('\n')}
</pre>
</div>
</div>
</main>
</div>
);
}
export default Index;

View File

@ -0,0 +1,103 @@
// This workflow is generated with ComfyUI-fal
const WORKFLOW = {
prompt: {
'3': {
inputs: {
seed: 704126934460886,
steps: 20,
cfg: 8,
sampler_name: 'euler',
scheduler: 'normal',
denoise: 1,
model: ['4', 0],
positive: ['6', 0],
negative: ['7', 0],
latent_image: ['5', 0],
},
class_type: 'KSampler',
},
'4': {
inputs: {
ckpt_name: 'sd_xl_1.0.safetensors',
},
class_type: 'CheckpointLoaderSimple',
},
'5': {
inputs: {
width: 1024,
height: 1024,
batch_size: 1,
},
class_type: 'EmptyLatentImage',
},
'6': {
inputs: {
text: ['10', 0],
clip: ['4', 1],
},
class_type: 'CLIPTextEncode',
},
'7': {
inputs: {
text: ['11', 0],
clip: ['4', 1],
},
class_type: 'CLIPTextEncode',
},
'8': {
inputs: {
samples: ['3', 0],
vae: ['4', 2],
},
class_type: 'VAEDecode',
},
'9': {
inputs: {
filename_prefix: 'ComfyUI',
images: ['8', 0],
},
class_type: 'SaveImage',
},
'10': {
inputs: {
name: 'prompt',
value:
'beautiful scenery nature glass bottle landscape, , purple galaxy bottle,',
},
class_type: 'StringInput_fal',
},
'11': {
inputs: {
name: 'negative_prompt',
value: 'text, watermark',
},
class_type: 'StringInput_fal',
},
},
extra_data: {},
fal_inputs_dev_info: {
prompt: {
key: ['10', 'inputs', 'value'],
class_type: 'StringInput_fal',
},
negative_prompt: {
key: ['11', 'inputs', 'value'],
class_type: 'StringInput_fal',
},
},
fal_inputs: {
prompt:
'beautiful scenery nature glass bottle landscape, , purple galaxy bottle,',
negative_prompt: 'text, watermark',
},
};
export default function getWorkflow(object: any) {
let newWorkflow = JSON.parse(JSON.stringify(WORKFLOW));
newWorkflow.fal_inputs = {
...newWorkflow.fal_inputs,
...object,
};
return newWorkflow;
}

View File

@ -0,0 +1,86 @@
import * as fal from '@fal-ai/serverless-client';
// This is a simple example of how to use the fal-js SDK to execute a workflow.
const result = fal.subscribe('fal-ai/fast-sdxl', {
input: getWorkflow({}),
logs: true,
onQueueUpdate: (update) => {
if (update.status === 'IN_PROGRESS') {
update.logs.map((log) => log.message).forEach(console.log);
}
},
});
// This workflow is generated with ComfyUI-fal
const WORKFLOW = {
prompt: {
'3': {
inputs: {
seed: 156680208700286,
steps: 20,
cfg: 8,
sampler_name: 'euler',
scheduler: 'normal',
denoise: 1,
model: ['4', 0],
positive: ['6', 0],
negative: ['7', 0],
latent_image: ['5', 0],
},
class_type: 'KSampler',
},
'4': {
inputs: {
ckpt_name: 'v1-5-pruned-emaonly.ckpt',
},
class_type: 'CheckpointLoaderSimple',
},
'5': {
inputs: {
width: 512,
height: 512,
batch_size: 1,
},
class_type: 'EmptyLatentImage',
},
'6': {
inputs: {
clip: ['4', 1],
},
class_type: 'CLIPTextEncode',
},
'7': {
inputs: {
clip: ['4', 1],
},
class_type: 'CLIPTextEncode',
},
'8': {
inputs: {
samples: ['3', 0],
vae: ['4', 2],
},
class_type: 'VAEDecode',
},
'9': {
inputs: {
filename_prefix: 'ComfyUI',
images: ['8', 0],
},
class_type: 'SaveImage',
},
},
extra_data: {},
fal_inputs_dev_info: {},
fal_inputs: {},
};
function getWorkflow(object: any) {
let newWorkflow = JSON.parse(JSON.stringify(WORKFLOW));
newWorkflow.fal_inputs = {
...newWorkflow.fal_inputs,
...object,
};
return newWorkflow;
}

View File

@ -0,0 +1,91 @@
// This workflow is generated with ComfyUI-fal
const WORKFLOW = {
prompt: {
'3': {
inputs: {
seed: 156680208700286,
steps: 20,
cfg: 8,
sampler_name: 'euler',
scheduler: 'normal',
denoise: 1,
model: ['4', 0],
positive: ['6', 0],
negative: ['7', 0],
latent_image: ['5', 0],
},
class_type: 'KSampler',
},
'4': {
inputs: {
ckpt_name: 'v1-5-pruned-emaonly.ckpt',
},
class_type: 'CheckpointLoaderSimple',
},
'5': {
inputs: {
width: 512,
height: 512,
batch_size: 1,
},
class_type: 'EmptyLatentImage',
},
'6': {
inputs: {
text: ['10', 0],
clip: ['4', 1],
},
class_type: 'CLIPTextEncode',
},
'7': {
inputs: {
text: 'text, watermark',
clip: ['4', 1],
},
class_type: 'CLIPTextEncode',
},
'8': {
inputs: {
samples: ['3', 0],
vae: ['4', 2],
},
class_type: 'VAEDecode',
},
'9': {
inputs: {
filename_prefix: 'ComfyUI',
images: ['8', 0],
},
class_type: 'SaveImage',
},
'10': {
inputs: {
name: 'cliptextencode_text',
value:
'beautiful scenery nature glass bottle landscape, , purple galaxy bottle,',
},
class_type: 'StringInput_fal',
},
},
extra_data: {},
fal_inputs_dev_info: {
cliptextencode_text: {
key: ['10', 'inputs', 'value'],
class_type: 'StringInput_fal',
},
},
fal_inputs: {
cliptextencode_text:
'beautiful scenery nature glass bottle landscape, , purple galaxy bottle,',
},
};
export function getWorkflow(object: any) {
let newWorkflow = JSON.parse(JSON.stringify(WORKFLOW));
newWorkflow.fal_inputs = {
...newWorkflow.fal_inputs,
...object,
};
return newWorkflow;
}