| 12345678910111213 |
- import requests
- import torch
- from PIL import Image
- from transformers import BlipForConditionalGeneration
- from transformers import BlipProcessor
- processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
- model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base", torch_dtype=torch.float32).to("cpu")
- def caption_image(raw_image):
- inputs = processor(raw_image.convert('RGB'), return_tensors="pt").to("cpu", torch.float32)
- out = model.generate(**inputs, max_new_tokens=100)
- return processor.decode(out[0], skip_special_tokens=True)
|