Spaces:
Running
Running
| from transformers import BlipProcessor, BlipForConditionalGeneration | |
| from PIL import Image | |
| # Load once on startup for performance | |
| processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base") | |
| model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base") | |
| def generate_caption(pil_img): | |
| inputs = processor(images=pil_img, return_tensors="pt") | |
| out = model.generate(**inputs) | |
| caption = processor.decode(out[0], skip_special_tokens=True) | |
| return caption.lower() | |