serhatderya's picture
Update app.py
1be82ef
import gradio as gr
from utils import randomize_seed_fn
import requests
import json
from io import BytesIO
from PIL import Image,ImageOps
import base64
import numpy as np
import openai
import os
def magic_prompt(prompt):
openai.api_key = os.environ["openai_api_key"]
mag_prompt=""
if(prompt=="" or prompt=="Please write a prompt first ✍️"):
mag_prompt = "Please write a prompt first ✍️"
else:
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a rephraser."},
{"role": "user", "content": "As an image description enhancer, you will add details such as tone, style, context, and effects based on the given description. The output will be a short group of words separated by commas that enhance the original description. To enhance image descriptions, include specific and detailed keywords from categories like subject, medium, style, artist, website, resolution, additional details, color, and lighting. However, using too many keywords may not necessarily improve the quality of the generated image. by keeping this in mind write me a description for β€œβ€β€{prompt}”””".format(prompt=prompt)}
]
)
mag_prompt = str(completion.choices[0].message.content)
return mag_prompt
def process(
#self,
image: np.ndarray,
prompt: str,
additional_prompt: str,
negative_prompt: str,
num_images: int,
image_resolution: int,
preprocess_resolution: int,
num_steps: int,
guidance_scale: float,
seed: int,
preprocessor_name: str,
sketch_type: str,
render_type: str
):
image = Image.fromarray(image)
buffered = BytesIO()
image.save(buffered, format="JPEG")
img_str = base64.b64encode(buffered.getvalue())
img_base64 = img_str.decode()
if sketch_type=="Scribble":
url = os.environ["cerebrium_scribble_url"]
elif sketch_type=="Sketch":
url = os.environ["cerebrium_softedge_url"]
if render_type == "3D Design":
additional_prompt = "best quality, extremely detailed, 3d product render, finely detailed, purism, ue 5, a computer rendering, minimalism, octane render, 4k"
negative_prompt = "EasyNegative, (worst quality:2), (low quality:2), (normal quality:2), low res, ((monochrome)), ((grayscale)), cropped, text, jpeg artifacts, signature, watermark, username, sketch, cartoon, drawing, anime, duplicate, blurry, semi-realistic, out of frame, ugly, deformed"
elif render_type == "Realistic":
additional_prompt = "best quality, extremely detailed, 4k, hyper realistic"
negative_prompt = "longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality"
payload = json.dumps({"prompt": prompt + additional_prompt,
"negative_prompt": negative_prompt,
"image": img_base64,
"num_images_per_prompt":4,
"seed": seed,
"preprocessor_name":preprocessor_name})
headers = {
'Authorization': os.environ["cerebrium_auth"],
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
response = response.json()
res_dic = []
for i in response["result"]:
im = Image.open(BytesIO(base64.b64decode(i)))
im = im.convert("RGB")
res_dic.append(im)
return res_dic[0], res_dic[1], res_dic[2], res_dic[3]#res_dic
def guideline_eraser(eraser_strength, image):
image = Image.fromarray(image)
img_gray = ImageOps.grayscale(image)
data = np.array(img_gray)
img_gray_data = data.T
areas_to_delete = (img_gray_data>=(255-eraser_strength*2.55))
data[..., :][areas_to_delete.T] = (255)
res_img = Image.fromarray(data)
return res_img
def upscale(image: np.ndarray):
image = Image.fromarray(image)
buffered = BytesIO()
image.save(buffered, format="JPEG")
img_str = base64.b64encode(buffered.getvalue())
img_base64 = img_str.decode()
url = "https://run.cerebrium.ai/esrgan-upscaler-webhook/predict"
#url = "https://run.cerebrium.ai/sd-upscaler-webhook/predict"
payload = json.dumps({"image": img_base64,
"upscale":4})
headers = {
'Authorization': os.environ["cerebrium_auth"],
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
response = response.json()
upscaled_img = Image.open(BytesIO(base64.b64decode(response["result"]["result"])))
upscaled_img = upscaled_img.convert("RGB")
return upscaled_img
def save_img(image):
return image
def create_demo(process):
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown("# Concept Creator πŸŽ¨πŸ–ŒοΈ")
with gr.Row():
with gr.Column():
image = gr.Image(label="Canvas")
original_img = gr.Image(height=100, width=100, label="Image", interactive=False, show_share_button=False)
image.upload(fn=save_img, inputs=[image], outputs=original_img)
eraser_strength = gr.Slider(0, 100, step=10, label="Guideline Eraser Strength")
eraser_strength.release(guideline_eraser,inputs=[eraser_strength, original_img],outputs=image)
sketch_type = gr.Radio(
label="Design Type",
info = "Type of your input design",
choices=["Scribble", "Sketch"],
type = "value",
value = "Scribble"
)
render_type = gr.Radio(
label="Render Type",
info = "Type of the desired output design",
choices=["3D Design", "Realistic"],
type = "value",
value = "3D Design"
)
prompt = gr.Textbox(label='Prompt')
mag_prompt_btn = gr.Button("✨Magic Prompt Enhancer")
run_button = gr.Button('Run')
with gr.Accordion('Advanced options', open=False, visible=False):
preprocessor_name = gr.Radio(
label='Preprocessor',
choices=['HED', 'PidiNet', 'None'],
type='value',
value='PidiNet')
num_samples = gr.Slider(label='Number of images',
minimum=1,
maximum=4,
value=4,
step=1)
image_resolution = gr.Slider(
label='Image resolution',
minimum=256,
maximum=512,
value=512,
step=256)
preprocess_resolution = gr.Slider(
label='Preprocess resolution',
minimum=128,
maximum=512,
value=512,
step=1)
num_steps = gr.Slider(label='Number of steps',
minimum=1,
maximum=100,
value=20,
step=1)
guidance_scale = gr.Slider(label='Guidance scale',
minimum=0.1,
maximum=30.0,
value=9.0,
step=0.1)
seed = gr.Slider(label='Seed',
minimum=0,
maximum=1000000,
step=1,
value=0)
randomize_seed = gr.Checkbox(label='Randomize seed',
value=True)
a_prompt = gr.Textbox(
label='Additional prompt',
value='best quality, extremely detailed, 3d product render, finely detailed, purism, ue 5, a computer rendering, minimalism, octane render, 4k')
n_prompt = gr.Textbox(
label='Negative prompt',
value=
'EasyNegative, (worst quality:2), (low quality:2), (normal quality:2), low res, ((monochrome)), ((grayscale)), cropped, text, jpeg artifacts, signature, watermark, username, sketch, cartoon, drawing, anime, duplicate, blurry, semi-realistic, out of frame, ugly, deformed'
)
with gr.Column():
""""
result = gr.Gallery(label='Output',
show_label=True,
preview=True,
columns=2,
object_fit='scale-down')
"""
with gr.Column():
with gr.Row():
result1 = gr.Image(show_download_button=True, show_label=False, show_share_button=False)
result2 = gr.Image(show_download_button=True, show_label=False, show_share_button=False)
with gr.Row():
up_btn1 = gr.Button("4x Upscale")
up_btn2 = gr.Button("4x Upscale")
with gr.Row():
result3 = gr.Image(show_download_button=True, show_label=False, show_share_button=False)
result4 = gr.Image(show_download_button=True, show_label=False, show_share_button=False)
with gr.Row():
up_btn3 = gr.Button("4x Upscale")
up_btn4 = gr.Button("4x Upscale")
with gr.Row():
last_output = gr.Image()
inputs = [
image,
prompt,
a_prompt,
n_prompt,
num_samples,
image_resolution,
preprocess_resolution,
num_steps,
guidance_scale,
seed,
preprocessor_name,
sketch_type,
render_type
]
prompt.submit(
fn=randomize_seed_fn,
inputs=[seed, randomize_seed],
outputs=seed,
queue=False,
api_name=False,
).then(
fn=process,
inputs=inputs,
outputs=[result1, result2, result3, result4],
api_name=False,
)
mag_prompt_btn.click(
fn=magic_prompt,
inputs=prompt,
outputs=prompt,
)
run_button.click(
fn=randomize_seed_fn,
inputs=[seed, randomize_seed],
outputs=seed,
queue=False,
api_name=False,
).then(
fn=process,
inputs=inputs,
outputs=[result1, result2, result3, result4],
api_name='scribble',
)
up_btn1.click(
fn=upscale,
inputs=[result1],
outputs=last_output
)
up_btn2.click(
fn=upscale,
inputs=[result2],
outputs=last_output
)
up_btn3.click(
fn=upscale,
inputs=[result3],
outputs=last_output
)
up_btn4.click(
fn=upscale,
inputs=[result4],
outputs=last_output
)
return demo
demo = create_demo(process)
demo.queue(concurrency_count=10, api_open=False).launch(show_api=False)