Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,7 +1,8 @@
|
|
| 1 |
#!/usr/bin/env python
|
| 2 |
"""
|
| 3 |
Gradio demo for Wan2.1 FLF2V – First & Last Frame → Video
|
| 4 |
-
|
|
|
|
| 5 |
"""
|
| 6 |
import os
|
| 7 |
import numpy as np
|
|
@@ -21,31 +22,29 @@ DTYPE = torch.float16
|
|
| 21 |
MAX_AREA = 1280 * 720
|
| 22 |
DEFAULT_FRAMES = 81
|
| 23 |
|
| 24 |
-
#
|
| 25 |
os.environ["HF_HOME"] = "/mnt/data/huggingface"
|
| 26 |
|
| 27 |
# -----------------------------------------------------------------------------
|
| 28 |
-
#
|
| 29 |
# -----------------------------------------------------------------------------
|
| 30 |
def load_pipeline():
|
| 31 |
-
# 1)
|
| 32 |
image_encoder = CLIPVisionModel.from_pretrained(
|
| 33 |
MODEL_ID, subfolder="image_encoder", torch_dtype=torch.float32
|
| 34 |
)
|
| 35 |
-
# 2) VAE
|
| 36 |
vae = AutoencoderKLWan.from_pretrained(
|
| 37 |
MODEL_ID, subfolder="vae", torch_dtype=DTYPE
|
| 38 |
)
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
# 3) Pipeline, balanced across GPU & CPU, fast processor by default
|
| 42 |
pipe = WanImageToVideoPipeline.from_pretrained(
|
| 43 |
MODEL_ID,
|
| 44 |
image_encoder=image_encoder,
|
| 45 |
vae=vae,
|
| 46 |
torch_dtype=DTYPE,
|
| 47 |
-
device_map="balanced",
|
| 48 |
-
use_fast=True,
|
| 49 |
)
|
| 50 |
return pipe
|
| 51 |
|
|
@@ -72,7 +71,7 @@ def center_crop_resize(img: Image.Image, h, w):
|
|
| 72 |
|
| 73 |
|
| 74 |
# -----------------------------------------------------------------------------
|
| 75 |
-
#
|
| 76 |
# -----------------------------------------------------------------------------
|
| 77 |
def generate(
|
| 78 |
first_frame: Image.Image,
|
|
@@ -84,7 +83,7 @@ def generate(
|
|
| 84 |
num_frames: int,
|
| 85 |
seed: int,
|
| 86 |
fps: int,
|
| 87 |
-
progress= gr.Progress(),
|
| 88 |
):
|
| 89 |
# seed
|
| 90 |
if seed == -1:
|
|
@@ -98,10 +97,10 @@ def generate(
|
|
| 98 |
progress(0.15, desc="Resizing last frame…")
|
| 99 |
last_resized = center_crop_resize(last_frame, h, w)
|
| 100 |
else:
|
| 101 |
-
last_resized = first_resized
|
| 102 |
|
| 103 |
-
# 15–25%:
|
| 104 |
-
progress(0.25, desc="
|
| 105 |
out = PIPE(
|
| 106 |
image=first_resized,
|
| 107 |
last_image=last_resized,
|
|
@@ -115,8 +114,8 @@ def generate(
|
|
| 115 |
generator=gen,
|
| 116 |
)
|
| 117 |
|
| 118 |
-
# 25–90%:
|
| 119 |
-
progress(0.90, desc="
|
| 120 |
video_path = export_to_video(out.frames[0], fps=fps)
|
| 121 |
|
| 122 |
# done
|
|
@@ -135,7 +134,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
| 135 |
last_img = gr.Image(label="Last frame", type="pil")
|
| 136 |
|
| 137 |
prompt = gr.Textbox(label="Prompt", placeholder="A blue bird takes off…")
|
| 138 |
-
negative = gr.Textbox(label="Negative prompt (optional)", placeholder="blurry,
|
| 139 |
|
| 140 |
with gr.Accordion("Advanced parameters", open=False):
|
| 141 |
steps = gr.Slider(10, 50, value=30, step=1, label="Steps")
|
|
@@ -155,5 +154,5 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
| 155 |
outputs=[ download, seed_used ],
|
| 156 |
)
|
| 157 |
|
| 158 |
-
#
|
| 159 |
demo.queue().launch(server_name="0.0.0.0", server_port=7860)
|
|
|
|
| 1 |
#!/usr/bin/env python
|
| 2 |
"""
|
| 3 |
Gradio demo for Wan2.1 FLF2V – First & Last Frame → Video
|
| 4 |
+
Loads once, uses balanced device placement, streams high-level progress,
|
| 5 |
+
and auto-offers the .mp4 for download.
|
| 6 |
"""
|
| 7 |
import os
|
| 8 |
import numpy as np
|
|
|
|
| 22 |
MAX_AREA = 1280 * 720
|
| 23 |
DEFAULT_FRAMES = 81
|
| 24 |
|
| 25 |
+
# keep Hugging Face cache on disk so we don't re-download
|
| 26 |
os.environ["HF_HOME"] = "/mnt/data/huggingface"
|
| 27 |
|
| 28 |
# -----------------------------------------------------------------------------
|
| 29 |
+
# PIPELINE LOADED ONCE
|
| 30 |
# -----------------------------------------------------------------------------
|
| 31 |
def load_pipeline():
|
| 32 |
+
# 1) image encoder in full precision
|
| 33 |
image_encoder = CLIPVisionModel.from_pretrained(
|
| 34 |
MODEL_ID, subfolder="image_encoder", torch_dtype=torch.float32
|
| 35 |
)
|
| 36 |
+
# 2) VAE in half precision (no slicing API here)
|
| 37 |
vae = AutoencoderKLWan.from_pretrained(
|
| 38 |
MODEL_ID, subfolder="vae", torch_dtype=DTYPE
|
| 39 |
)
|
| 40 |
+
# 3) load full pipeline balanced across GPU/CPU, with the fast processor
|
|
|
|
|
|
|
| 41 |
pipe = WanImageToVideoPipeline.from_pretrained(
|
| 42 |
MODEL_ID,
|
| 43 |
image_encoder=image_encoder,
|
| 44 |
vae=vae,
|
| 45 |
torch_dtype=DTYPE,
|
| 46 |
+
device_map="balanced", # spreads the model to fit your 24 GB
|
| 47 |
+
use_fast=True, # get the fast CLIPImageProcessor internally
|
| 48 |
)
|
| 49 |
return pipe
|
| 50 |
|
|
|
|
| 71 |
|
| 72 |
|
| 73 |
# -----------------------------------------------------------------------------
|
| 74 |
+
# GENERATE WITH STREAMING PROGRESS
|
| 75 |
# -----------------------------------------------------------------------------
|
| 76 |
def generate(
|
| 77 |
first_frame: Image.Image,
|
|
|
|
| 83 |
num_frames: int,
|
| 84 |
seed: int,
|
| 85 |
fps: int,
|
| 86 |
+
progress= gr.Progress(),
|
| 87 |
):
|
| 88 |
# seed
|
| 89 |
if seed == -1:
|
|
|
|
| 97 |
progress(0.15, desc="Resizing last frame…")
|
| 98 |
last_resized = center_crop_resize(last_frame, h, w)
|
| 99 |
else:
|
| 100 |
+
last_resized = first_resized
|
| 101 |
|
| 102 |
+
# 15–25%: warm up
|
| 103 |
+
progress(0.25, desc="Initializing pipeline…")
|
| 104 |
out = PIPE(
|
| 105 |
image=first_resized,
|
| 106 |
last_image=last_resized,
|
|
|
|
| 114 |
generator=gen,
|
| 115 |
)
|
| 116 |
|
| 117 |
+
# 25–90%: inference happens inside the pipeline (console shows bars)
|
| 118 |
+
progress(0.90, desc="Exporting video…")
|
| 119 |
video_path = export_to_video(out.frames[0], fps=fps)
|
| 120 |
|
| 121 |
# done
|
|
|
|
| 134 |
last_img = gr.Image(label="Last frame", type="pil")
|
| 135 |
|
| 136 |
prompt = gr.Textbox(label="Prompt", placeholder="A blue bird takes off…")
|
| 137 |
+
negative = gr.Textbox(label="Negative prompt (optional)", placeholder="blurry, low-res")
|
| 138 |
|
| 139 |
with gr.Accordion("Advanced parameters", open=False):
|
| 140 |
steps = gr.Slider(10, 50, value=30, step=1, label="Steps")
|
|
|
|
| 154 |
outputs=[ download, seed_used ],
|
| 155 |
)
|
| 156 |
|
| 157 |
+
# serialize tasks with a mini progress badge
|
| 158 |
demo.queue().launch(server_name="0.0.0.0", server_port=7860)
|