|
|
|
|
|
try:
|
|
|
from transformers import Qwen2_5_VLForConditionalGeneration
|
|
|
print("✅ 成功导入!")
|
|
|
except Exception as e:
|
|
|
print("❌ 导入失败:", e)
|
|
|
import traceback
|
|
|
traceback.print_exc()
|
|
|
|
|
|
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
|
|
|
model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-Image-Edit",trust_remote_code=True)
|
|
|
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-Image-Edit",trust_remote_code=True)
|
|
|
import os
|
|
|
from PIL import Image
|
|
|
import torch
|
|
|
|
|
|
from diffusers import QwenImageEditPipeline
|
|
|
|
|
|
pipeline = QwenImageEditPipeline.from_pretrained("Qwen/Qwen-Image-Edit")
|
|
|
print("pipeline loaded")
|
|
|
pipeline.to(torch.bfloat16)
|
|
|
pipeline.to("cuda")
|
|
|
pipeline.set_progress_bar_config(disable=None)
|
|
|
|
|
|
image = Image.open("./input.png").convert("RGB")
|
|
|
prompt = "Change the rabbit's color to purple, with a flash light background."
|
|
|
|
|
|
|
|
|
inputs = {
|
|
|
"image": image,
|
|
|
"prompt": prompt,
|
|
|
"generator": torch.manual_seed(0),
|
|
|
"true_cfg_scale": 4.0,
|
|
|
"negative_prompt": " ",
|
|
|
"num_inference_steps": 50,
|
|
|
}
|
|
|
|
|
|
with torch.inference_mode():
|
|
|
output = pipeline(**inputs)
|
|
|
output_image = output.images[0]
|
|
|
output_image.save("output_image_edit.png")
|
|
|
print("image saved at", os.path.abspath("output_image_edit.png")) |