Commit ·
35e555c
1
Parent(s): 0f7781e
Update app.py
Browse files
app.py
CHANGED
|
@@ -6,9 +6,13 @@ import utils
|
|
| 6 |
import datetime
|
| 7 |
import time
|
| 8 |
import psutil
|
|
|
|
|
|
|
| 9 |
|
| 10 |
start_time = time.time()
|
| 11 |
is_colab = utils.is_google_colab()
|
|
|
|
|
|
|
| 12 |
|
| 13 |
class Model:
|
| 14 |
def __init__(self, name, path="", prefix=""):
|
|
@@ -90,6 +94,14 @@ def error_str(error, title="Error"):
|
|
| 90 |
return f"""#### {title}
|
| 91 |
{error}""" if error else ""
|
| 92 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 93 |
def custom_model_changed(path):
|
| 94 |
models[0].path = path
|
| 95 |
global current_model
|
|
@@ -101,8 +113,17 @@ def on_model_change(model_name):
|
|
| 101 |
|
| 102 |
return gr.update(visible = model_name == models[0].name), gr.update(placeholder=prefix)
|
| 103 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 104 |
def inference(model_name, prompt, guidance, steps, n_images=1, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt=""):
|
| 105 |
|
|
|
|
|
|
|
| 106 |
print(psutil.virtual_memory()) # print memory usage
|
| 107 |
|
| 108 |
global current_model
|
|
@@ -111,17 +132,21 @@ def inference(model_name, prompt, guidance, steps, n_images=1, width=512, height
|
|
| 111 |
current_model = model
|
| 112 |
model_path = current_model.path
|
| 113 |
|
| 114 |
-
generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
|
| 116 |
try:
|
| 117 |
if img is not None:
|
| 118 |
-
return img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance, steps, width, height, generator), None
|
| 119 |
else:
|
| 120 |
-
return txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width, height, generator), None
|
| 121 |
except Exception as e:
|
| 122 |
return None, error_str(e)
|
| 123 |
|
| 124 |
-
def txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width, height, generator):
|
| 125 |
|
| 126 |
print(f"{datetime.datetime.now()} txt_to_img, model: {current_model.name}")
|
| 127 |
|
|
@@ -131,6 +156,8 @@ def txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width,
|
|
| 131 |
if model_path != current_model_path or last_mode != "txt2img":
|
| 132 |
current_model_path = model_path
|
| 133 |
|
|
|
|
|
|
|
| 134 |
if is_colab or current_model == custom_model:
|
| 135 |
pipe = StableDiffusionPipeline.from_pretrained(
|
| 136 |
current_model_path,
|
|
@@ -161,11 +188,14 @@ def txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width,
|
|
| 161 |
guidance_scale = guidance,
|
| 162 |
width = width,
|
| 163 |
height = height,
|
| 164 |
-
generator = generator
|
|
|
|
|
|
|
|
|
|
| 165 |
|
| 166 |
return replace_nsfw_images(result)
|
| 167 |
|
| 168 |
-
def img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance, steps, width, height, generator):
|
| 169 |
|
| 170 |
print(f"{datetime.datetime.now()} img_to_img, model: {model_path}")
|
| 171 |
|
|
@@ -175,6 +205,8 @@ def img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance
|
|
| 175 |
if model_path != current_model_path or last_mode != "img2img":
|
| 176 |
current_model_path = model_path
|
| 177 |
|
|
|
|
|
|
|
| 178 |
if is_colab or current_model == custom_model:
|
| 179 |
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
| 180 |
current_model_path,
|
|
@@ -209,8 +241,11 @@ def img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance
|
|
| 209 |
guidance_scale = guidance,
|
| 210 |
# width = width,
|
| 211 |
# height = height,
|
| 212 |
-
generator = generator
|
| 213 |
-
|
|
|
|
|
|
|
|
|
|
| 214 |
return replace_nsfw_images(result)
|
| 215 |
|
| 216 |
def replace_nsfw_images(results):
|
|
@@ -223,9 +258,9 @@ def replace_nsfw_images(results):
|
|
| 223 |
results.images[i] = Image.open("nsfw.png")
|
| 224 |
return results.images
|
| 225 |
|
| 226 |
-
css = """.finetuned-diffusion-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.finetuned-diffusion-div div h1{font-weight:900;margin-bottom:7px}.finetuned-diffusion-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem}
|
| 227 |
-
"""
|
| 228 |
-
with gr.Blocks(css=css) as demo:
|
| 229 |
gr.HTML(
|
| 230 |
f"""
|
| 231 |
<div class="finetuned-diffusion-div">
|
|
@@ -261,6 +296,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 261 |
# image_out = gr.Image(height=512)
|
| 262 |
gallery = gr.Gallery(label="Generated images", show_label=False, elem_id="gallery").style(grid=[2], height="auto")
|
| 263 |
|
|
|
|
| 264 |
error_output = gr.Markdown()
|
| 265 |
|
| 266 |
with gr.Column(scale=45):
|
|
@@ -286,9 +322,10 @@ with gr.Blocks(css=css) as demo:
|
|
| 286 |
strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5)
|
| 287 |
|
| 288 |
if is_colab:
|
| 289 |
-
|
| 290 |
-
|
| 291 |
# n_images.change(lambda n: gr.Gallery().style(grid=[2 if n > 1 else 1], height="auto"), inputs=n_images, outputs=gallery)
|
|
|
|
| 292 |
|
| 293 |
inputs = [model_name, prompt, guidance, steps, n_images, width, height, seed, image, strength, neg_prompt]
|
| 294 |
outputs = [gallery, error_output]
|
|
@@ -316,8 +353,10 @@ with gr.Blocks(css=css) as demo:
|
|
| 316 |
</div>
|
| 317 |
""")
|
| 318 |
|
|
|
|
|
|
|
| 319 |
print(f"Space built in {time.time() - start_time:.2f} seconds")
|
| 320 |
|
| 321 |
-
if not is_colab:
|
| 322 |
-
|
| 323 |
demo.launch(debug=is_colab, share=is_colab)
|
|
|
|
| 6 |
import datetime
|
| 7 |
import time
|
| 8 |
import psutil
|
| 9 |
+
import random
|
| 10 |
+
|
| 11 |
|
| 12 |
start_time = time.time()
|
| 13 |
is_colab = utils.is_google_colab()
|
| 14 |
+
state = None
|
| 15 |
+
current_steps = 25
|
| 16 |
|
| 17 |
class Model:
|
| 18 |
def __init__(self, name, path="", prefix=""):
|
|
|
|
| 94 |
return f"""#### {title}
|
| 95 |
{error}""" if error else ""
|
| 96 |
|
| 97 |
+
def update_state(new_state):
|
| 98 |
+
global state
|
| 99 |
+
state = new_state
|
| 100 |
+
|
| 101 |
+
def update_state_info(old_state):
|
| 102 |
+
if state and state != old_state:
|
| 103 |
+
return gr.update(value=state)
|
| 104 |
+
|
| 105 |
def custom_model_changed(path):
|
| 106 |
models[0].path = path
|
| 107 |
global current_model
|
|
|
|
| 113 |
|
| 114 |
return gr.update(visible = model_name == models[0].name), gr.update(placeholder=prefix)
|
| 115 |
|
| 116 |
+
def on_steps_change(steps):
|
| 117 |
+
global current_steps
|
| 118 |
+
current_steps = steps
|
| 119 |
+
|
| 120 |
+
def pipe_callback(step: int, timestep: int, latents: torch.FloatTensor):
|
| 121 |
+
update_state(f"{step}/{current_steps} steps")#\nTime left, sec: {timestep/100:.0f}")
|
| 122 |
+
|
| 123 |
def inference(model_name, prompt, guidance, steps, n_images=1, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt=""):
|
| 124 |
|
| 125 |
+
update_state(" ")
|
| 126 |
+
|
| 127 |
print(psutil.virtual_memory()) # print memory usage
|
| 128 |
|
| 129 |
global current_model
|
|
|
|
| 132 |
current_model = model
|
| 133 |
model_path = current_model.path
|
| 134 |
|
| 135 |
+
# generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
|
| 136 |
+
if seed == 0:
|
| 137 |
+
seed = random.randint(0, 2147483647)
|
| 138 |
+
|
| 139 |
+
generator = torch.Generator('cuda').manual_seed(seed)
|
| 140 |
|
| 141 |
try:
|
| 142 |
if img is not None:
|
| 143 |
+
return img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance, steps, width, height, generator, seed), None
|
| 144 |
else:
|
| 145 |
+
return txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width, height, generator, seed), None
|
| 146 |
except Exception as e:
|
| 147 |
return None, error_str(e)
|
| 148 |
|
| 149 |
+
def txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width, height, generator, seed):
|
| 150 |
|
| 151 |
print(f"{datetime.datetime.now()} txt_to_img, model: {current_model.name}")
|
| 152 |
|
|
|
|
| 156 |
if model_path != current_model_path or last_mode != "txt2img":
|
| 157 |
current_model_path = model_path
|
| 158 |
|
| 159 |
+
update_state("Loading text-to-image model...")
|
| 160 |
+
|
| 161 |
if is_colab or current_model == custom_model:
|
| 162 |
pipe = StableDiffusionPipeline.from_pretrained(
|
| 163 |
current_model_path,
|
|
|
|
| 188 |
guidance_scale = guidance,
|
| 189 |
width = width,
|
| 190 |
height = height,
|
| 191 |
+
generator = generator,
|
| 192 |
+
callback=pipe_callback)
|
| 193 |
+
|
| 194 |
+
update_state(f"Done. Seed: {seed}")
|
| 195 |
|
| 196 |
return replace_nsfw_images(result)
|
| 197 |
|
| 198 |
+
def img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance, steps, width, height, generator, seed):
|
| 199 |
|
| 200 |
print(f"{datetime.datetime.now()} img_to_img, model: {model_path}")
|
| 201 |
|
|
|
|
| 205 |
if model_path != current_model_path or last_mode != "img2img":
|
| 206 |
current_model_path = model_path
|
| 207 |
|
| 208 |
+
update_state("Loading image-to-image model...")
|
| 209 |
+
|
| 210 |
if is_colab or current_model == custom_model:
|
| 211 |
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
| 212 |
current_model_path,
|
|
|
|
| 241 |
guidance_scale = guidance,
|
| 242 |
# width = width,
|
| 243 |
# height = height,
|
| 244 |
+
generator = generator,
|
| 245 |
+
callback=pipe_callback)
|
| 246 |
+
|
| 247 |
+
update_state(f"Done. Seed: {seed}")
|
| 248 |
+
|
| 249 |
return replace_nsfw_images(result)
|
| 250 |
|
| 251 |
def replace_nsfw_images(results):
|
|
|
|
| 258 |
results.images[i] = Image.open("nsfw.png")
|
| 259 |
return results.images
|
| 260 |
|
| 261 |
+
# css = """.finetuned-diffusion-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.finetuned-diffusion-div div h1{font-weight:900;margin-bottom:7px}.finetuned-diffusion-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem}
|
| 262 |
+
# """
|
| 263 |
+
with gr.Blocks(css="style.css") as demo:
|
| 264 |
gr.HTML(
|
| 265 |
f"""
|
| 266 |
<div class="finetuned-diffusion-div">
|
|
|
|
| 296 |
# image_out = gr.Image(height=512)
|
| 297 |
gallery = gr.Gallery(label="Generated images", show_label=False, elem_id="gallery").style(grid=[2], height="auto")
|
| 298 |
|
| 299 |
+
state_info = gr.Textbox(label="State", show_label=False, max_lines=2).style(container=False)
|
| 300 |
error_output = gr.Markdown()
|
| 301 |
|
| 302 |
with gr.Column(scale=45):
|
|
|
|
| 322 |
strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5)
|
| 323 |
|
| 324 |
if is_colab:
|
| 325 |
+
model_name.change(on_model_change, inputs=model_name, outputs=[custom_model_group, prompt], queue=False)
|
| 326 |
+
custom_model_path.change(custom_model_changed, inputs=custom_model_path, outputs=None)
|
| 327 |
# n_images.change(lambda n: gr.Gallery().style(grid=[2 if n > 1 else 1], height="auto"), inputs=n_images, outputs=gallery)
|
| 328 |
+
steps.change(on_steps_change, inputs=[steps], outputs=[], queue=False)
|
| 329 |
|
| 330 |
inputs = [model_name, prompt, guidance, steps, n_images, width, height, seed, image, strength, neg_prompt]
|
| 331 |
outputs = [gallery, error_output]
|
|
|
|
| 353 |
</div>
|
| 354 |
""")
|
| 355 |
|
| 356 |
+
demo.load(update_state_info, inputs=state_info, outputs=state_info, every=0.5, show_progress=False)
|
| 357 |
+
|
| 358 |
print(f"Space built in {time.time() - start_time:.2f} seconds")
|
| 359 |
|
| 360 |
+
# if not is_colab:
|
| 361 |
+
demo.queue(concurrency_count=1)
|
| 362 |
demo.launch(debug=is_colab, share=is_colab)
|