Ayeeee45's picture
Create App.py
1fe864e verified
#!/usr/bin/env python3
"""
Advanced Live Portrait Demo
Simplified version for Hugging Face Spaces
"""
import os
os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
# IMPORTANT: Force early import of huggingface_hub with our patch
import sys
# Create mock HfFolder class BEFORE anything imports huggingface_hub
class MockHfFolder:
@staticmethod
def get_token():
return os.environ.get("HF_TOKEN", "")
@staticmethod
def save_token(token):
os.environ["HF_TOKEN"] = token
# Monkey patch at module level
import types
hf_hub_module = types.ModuleType('huggingface_hub')
hf_hub_module.HfFolder = MockHfFolder
hf_hub_module.whoami = lambda: {"name": "demo_user"}
sys.modules['huggingface_hub'] = hf_hub_module
# Now import gradio - it will use our patched module
import gradio as gr
import numpy as np
from PIL import Image
import tempfile
import cv2
def create_demo_video(image):
"""Create a simple demo video from image"""
if image is None:
return None, "Please upload an image first"
try:
# Convert to numpy array
img_array = np.array(image)
# Create output video path
output_path = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False).name
# Video parameters
height, width = img_array.shape[:2]
fps = 24
duration = 2 # seconds
# Initialize video writer
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video_writer = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
# Create frames with simple animation
for i in range(fps * duration):
frame = img_array.copy()
# Simple animation effect
if i < fps:
# Fade in
alpha = i / fps
frame = (frame * alpha).astype(np.uint8)
elif i > fps:
# Slight zoom
scale = 1 + (i - fps) * 0.001
new_h, new_w = int(height * scale), int(width * scale)
if new_h > 0 and new_w > 0:
frame = cv2.resize(frame, (new_w, new_h))
# Crop to original size
y_start = (new_h - height) // 2
x_start = (new_w - width) // 2
frame = frame[y_start:y_start+height, x_start:x_start+width]
# Convert back to BGR for OpenCV
frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
video_writer.write(frame_bgr)
video_writer.release()
return output_path, "✅ Demo video created successfully!"
except Exception as e:
return None, f"❌ Error: {str(e)}"
# Create the interface
with gr.Blocks(title="Advanced Live Portrait Demo", theme=gr.themes.Soft()) as demo:
gr.Markdown("""
# 🎬 Advanced Live Portrait - Demo
*A preview of the portrait animation tool*
""")
with gr.Row():
with gr.Column():
image_input = gr.Image(
label="Upload a face image",
type="pil",
height=300
)
generate_btn = gr.Button(
"Generate Demo Animation",
variant="primary",
size="lg"
)
with gr.Column():
output_video = gr.Video(
label="Generated Animation",
height=300
)
status_text = gr.Textbox(
label="Status",
value="Ready to generate...",
interactive=False
)
# Connect button
generate_btn.click(
fn=create_demo_video,
inputs=[image_input],
outputs=[output_video, status_text]
)
# Add examples
gr.Examples(
examples=[
["https://images.unsplash.com/photo-1494790108755-2616b786d4b9?w=512&h=512&fit=crop"],
["https://images.unsplash.com/photo-1534528741775-53994a69daeb?w=512&h=512&fit=crop"],
["https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?w=512&h=512&fit=crop"],
],
inputs=[image_input],
outputs=[output_video, status_text],
fn=create_demo_video,
cache_examples=True,
label="Try these examples:"
)
# Add info section
with gr.Accordion("ℹ️ About this demo", open=False):
gr.Markdown("""
This is a **lightweight demo** of the Advanced Live Portrait tool.
### For the full version:
1. **Clone locally:**
```bash
git clone https://github.com/Ayeeee45/AdvancedLivePortrait-WebUI.git
cd AdvancedLivePortrait-WebUI
```
2. **Install dependencies:**
```bash
pip install -r requirements.txt
```
3. **Download models** (from the repository links)
4. **Run:**
```bash
python webui.py
```
### Requirements for full version:
- 8GB+ VRAM GPU
- 20GB+ disk space
- Python 3.10
""")
# Launch the app
if __name__ == "__main__":
demo.launch(
server_name="0.0.0.0",
server_port=7860,
share=False,
debug=False
)