GeradeHouse commited on
Commit
5db3e50
·
verified ·
1 Parent(s): 18358fb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -19
app.py CHANGED
@@ -1,7 +1,8 @@
1
  #!/usr/bin/env python
2
  """
3
  Gradio demo for Wan2.1 FLF2V – First & Last Frame → Video
4
- Auto-loads the fast processor and avoids missing preprocessor_config.json.
 
5
  """
6
  import os
7
  import numpy as np
@@ -21,31 +22,29 @@ DTYPE = torch.float16
21
  MAX_AREA = 1280 * 720
22
  DEFAULT_FRAMES = 81
23
 
24
- # Persist cache so safetensors only download once
25
  os.environ["HF_HOME"] = "/mnt/data/huggingface"
26
 
27
  # -----------------------------------------------------------------------------
28
- # LOAD PIPELINE ONCE
29
  # -----------------------------------------------------------------------------
30
  def load_pipeline():
31
- # 1) Image encoder (fp32)
32
  image_encoder = CLIPVisionModel.from_pretrained(
33
  MODEL_ID, subfolder="image_encoder", torch_dtype=torch.float32
34
  )
35
- # 2) VAE (half-precision) + slicing
36
  vae = AutoencoderKLWan.from_pretrained(
37
  MODEL_ID, subfolder="vae", torch_dtype=DTYPE
38
  )
39
- vae.enable_slicing()
40
-
41
- # 3) Pipeline, balanced across GPU & CPU, fast processor by default
42
  pipe = WanImageToVideoPipeline.from_pretrained(
43
  MODEL_ID,
44
  image_encoder=image_encoder,
45
  vae=vae,
46
  torch_dtype=DTYPE,
47
- device_map="balanced",
48
- use_fast=True, # get the fast CLIPImageProcessor internally
49
  )
50
  return pipe
51
 
@@ -72,7 +71,7 @@ def center_crop_resize(img: Image.Image, h, w):
72
 
73
 
74
  # -----------------------------------------------------------------------------
75
- # GENERATION WITH PROGRESS STREAMING
76
  # -----------------------------------------------------------------------------
77
  def generate(
78
  first_frame: Image.Image,
@@ -84,7 +83,7 @@ def generate(
84
  num_frames: int,
85
  seed: int,
86
  fps: int,
87
- progress= gr.Progress(), # built-in streamer
88
  ):
89
  # seed
90
  if seed == -1:
@@ -98,10 +97,10 @@ def generate(
98
  progress(0.15, desc="Resizing last frame…")
99
  last_resized = center_crop_resize(last_frame, h, w)
100
  else:
101
- last_resized = first_resized # same size
102
 
103
- # 15–25%: setup
104
- progress(0.25, desc="Launching pipeline…")
105
  out = PIPE(
106
  image=first_resized,
107
  last_image=last_resized,
@@ -115,8 +114,8 @@ def generate(
115
  generator=gen,
116
  )
117
 
118
- # 25–90%: we assume the pipeline prints its own bars in console
119
- progress(0.90, desc="Building video…")
120
  video_path = export_to_video(out.frames[0], fps=fps)
121
 
122
  # done
@@ -135,7 +134,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
135
  last_img = gr.Image(label="Last frame", type="pil")
136
 
137
  prompt = gr.Textbox(label="Prompt", placeholder="A blue bird takes off…")
138
- negative = gr.Textbox(label="Negative prompt (optional)", placeholder="blurry, lowres")
139
 
140
  with gr.Accordion("Advanced parameters", open=False):
141
  steps = gr.Slider(10, 50, value=30, step=1, label="Steps")
@@ -155,5 +154,5 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
155
  outputs=[ download, seed_used ],
156
  )
157
 
158
- # queue() so tasks are serialized with a top-right mini-progress indicator
159
  demo.queue().launch(server_name="0.0.0.0", server_port=7860)
 
1
  #!/usr/bin/env python
2
  """
3
  Gradio demo for Wan2.1 FLF2V – First & Last Frame → Video
4
+ Loads once, uses balanced device placement, streams high-level progress,
5
+ and auto-offers the .mp4 for download.
6
  """
7
  import os
8
  import numpy as np
 
22
  MAX_AREA = 1280 * 720
23
  DEFAULT_FRAMES = 81
24
 
25
+ # keep Hugging Face cache on disk so we don't re-download
26
  os.environ["HF_HOME"] = "/mnt/data/huggingface"
27
 
28
  # -----------------------------------------------------------------------------
29
+ # PIPELINE LOADED ONCE
30
  # -----------------------------------------------------------------------------
31
  def load_pipeline():
32
+ # 1) image encoder in full precision
33
  image_encoder = CLIPVisionModel.from_pretrained(
34
  MODEL_ID, subfolder="image_encoder", torch_dtype=torch.float32
35
  )
36
+ # 2) VAE in half precision (no slicing API here)
37
  vae = AutoencoderKLWan.from_pretrained(
38
  MODEL_ID, subfolder="vae", torch_dtype=DTYPE
39
  )
40
+ # 3) load full pipeline balanced across GPU/CPU, with the fast processor
 
 
41
  pipe = WanImageToVideoPipeline.from_pretrained(
42
  MODEL_ID,
43
  image_encoder=image_encoder,
44
  vae=vae,
45
  torch_dtype=DTYPE,
46
+ device_map="balanced", # spreads the model to fit your 24 GB
47
+ use_fast=True, # get the fast CLIPImageProcessor internally
48
  )
49
  return pipe
50
 
 
71
 
72
 
73
  # -----------------------------------------------------------------------------
74
+ # GENERATE WITH STREAMING PROGRESS
75
  # -----------------------------------------------------------------------------
76
  def generate(
77
  first_frame: Image.Image,
 
83
  num_frames: int,
84
  seed: int,
85
  fps: int,
86
+ progress= gr.Progress(),
87
  ):
88
  # seed
89
  if seed == -1:
 
97
  progress(0.15, desc="Resizing last frame…")
98
  last_resized = center_crop_resize(last_frame, h, w)
99
  else:
100
+ last_resized = first_resized
101
 
102
+ # 15–25%: warm up
103
+ progress(0.25, desc="Initializing pipeline…")
104
  out = PIPE(
105
  image=first_resized,
106
  last_image=last_resized,
 
114
  generator=gen,
115
  )
116
 
117
+ # 25–90%: inference happens inside the pipeline (console shows bars)
118
+ progress(0.90, desc="Exporting video…")
119
  video_path = export_to_video(out.frames[0], fps=fps)
120
 
121
  # done
 
134
  last_img = gr.Image(label="Last frame", type="pil")
135
 
136
  prompt = gr.Textbox(label="Prompt", placeholder="A blue bird takes off…")
137
+ negative = gr.Textbox(label="Negative prompt (optional)", placeholder="blurry, low-res")
138
 
139
  with gr.Accordion("Advanced parameters", open=False):
140
  steps = gr.Slider(10, 50, value=30, step=1, label="Steps")
 
154
  outputs=[ download, seed_used ],
155
  )
156
 
157
+ # serialize tasks with a mini progress badge
158
  demo.queue().launch(server_name="0.0.0.0", server_port=7860)