LPX55 commited on
Commit
cdd3ef4
·
verified ·
1 Parent(s): 720d067

Update app.py from anycoder

Browse files
Files changed (1) hide show
  1. app.py +0 -167
app.py CHANGED
@@ -1,167 +0,0 @@
1
- import gradio as gr
2
- import numpy as np
3
- import time
4
- from PIL import Image
5
- import torch
6
- from torchvision import transforms
7
- import torchvision.models as models
8
- import os
9
-
10
- # Model loading with caching for efficient GPU usage
11
- class ImageProcessor:
12
- def __init__(self):
13
- self.model = None
14
- self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
15
- self.transform = transforms.Compose([
16
- transforms.Resize(256),
17
- transforms.CenterCrop(224),
18
- transforms.ToTensor(),
19
- transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
20
- ])
21
- self.class_names = None
22
- self.load_model()
23
-
24
- def load_model(self):
25
- """Load model with efficient GPU memory management"""
26
- if self.model is None:
27
- # Use a smaller, efficient model for GPU optimization
28
- self.model = models.resnet18(pretrained=True)
29
- self.model = self.model.to(self.device)
30
- self.model.eval()
31
-
32
- # Load class names (cached)
33
- try:
34
- # Try to load from common locations
35
- class_names_path = os.path.join(os.path.dirname(__file__), "imagenet_classes.txt")
36
- if os.path.exists(class_names_path):
37
- with open(class_names_path) as f:
38
- self.class_names = [line.strip() for line in f.readlines()]
39
- else:
40
- # Fallback to default class names
41
- self.class_names = [f"class_{i}" for i in range(1000)]
42
- except Exception:
43
- self.class_names = [f"class_{i}" for i in range(1000)]
44
-
45
- def process_image(self, image):
46
- """Process image with GPU optimization"""
47
- start_time = time.time()
48
-
49
- # Convert PIL Image to tensor
50
- img_tensor = self.transform(image).unsqueeze(0).to(self.device)
51
-
52
- # Run inference with GPU optimization
53
- with torch.no_grad():
54
- outputs = self.model(img_tensor)
55
-
56
- # Get predictions
57
- probabilities = torch.nn.functional.softmax(outputs[0], dim=0)
58
- top5_prob, top5_catid = torch.topk(probabilities, 5)
59
-
60
- # Format results
61
- results = []
62
- for i in range(top5_prob.size(0)):
63
- class_name = self.class_names[top5_catid[i]]
64
- results.append({
65
- "class": class_name,
66
- "probability": float(top5_prob[i])
67
- })
68
-
69
- processing_time = time.time() - start_time
70
- return results, processing_time
71
-
72
- # Initialize processor (loaded once when the app starts)
73
- processor = ImageProcessor()
74
-
75
- def predict(image):
76
- """Main prediction function with error handling and loading states"""
77
- if image is None:
78
- raise gr.Error("Please upload an image first!")
79
-
80
- try:
81
- # Show loading state
82
- yield "Processing image...", None, None
83
-
84
- # Process the image
85
- results, processing_time = processor.process_image(image)
86
-
87
- # Format output
88
- output_text = f"Processing time: {processing_time:.2f} seconds\n\nTop 5 predictions:\n"
89
- for result in results:
90
- output_text += f"- {result['class']}: {result['probability']:.2%}\n"
91
-
92
- yield None, output_text, None
93
-
94
- except Exception as e:
95
- raise gr.Error(f"Error processing image: {str(e)}")
96
-
97
- # Create custom theme for modern UI
98
- custom_theme = gr.themes.Soft(
99
- primary_hue="blue",
100
- secondary_hue="indigo",
101
- neutral_hue="slate",
102
- font=gr.themes.GoogleFont("Inter"),
103
- text_size="lg",
104
- spacing_size="lg",
105
- radius_size="md"
106
- ).set(
107
- button_primary_background_fill="*primary_600",
108
- button_primary_background_fill_hover="*primary_700",
109
- block_title_text_weight="600",
110
- input_background_fill="*surface_light",
111
- input_border_color="*border_subtle",
112
- input_border_width="1px",
113
- input_border_radius="8px",
114
- )
115
-
116
- with gr.Blocks() as demo:
117
- gr.Markdown("""
118
- # 🚀 AI Image Classifier
119
-
120
- Upload an image to get instant predictions using a pre-trained ResNet model.
121
- Optimized for efficient GPU usage with smart container management.
122
-
123
- Built with anycoder - [Deploy on Hugging Face Spaces](https://huggingface.co/spaces/akhaliq/anycoder)
124
- """)
125
-
126
- with gr.Row():
127
- with gr.Column():
128
- image_input = gr.Image(
129
- type="pil",
130
- label="Upload Image",
131
- height=400,
132
- interactive=True
133
- )
134
- upload_btn = gr.Button("Process Image", variant="primary", scale=1)
135
-
136
- with gr.Column():
137
- output_text = gr.Textbox(
138
- label="Predictions",
139
- lines=10,
140
- interactive=False,
141
- placeholder="Results will appear here..."
142
- )
143
- processing_status = gr.Textbox(
144
- label="Status",
145
- interactive=False,
146
- visible=False
147
- )
148
-
149
- # Event listeners with proper Gradio 6 syntax
150
- upload_btn.click(
151
- fn=predict,
152
- inputs=[image_input],
153
- outputs=[processing_status, output_text],
154
- api_visibility="public"
155
- )
156
-
157
- # Launch with Modal-optimized parameters
158
- demo.launch(
159
- theme=custom_theme,
160
- footer_links=[{"label": "Built with anycoder", "url": "https://huggingface.co/spaces/akhaliq/anycoder"}],
161
- show_error=True,
162
- server_name="0.0.0.0",
163
- server_port=7860,
164
- max_threads=100,
165
- analytics_enabled=False,
166
- share=False # Disable share for production deployment
167
- )