|
from PIL import Image
|
|
import requests
|
|
from io import BytesIO
|
|
import torch
|
|
from transformers import AutoModel, AutoProcessor, AutoConfig, AutoModelForVision2Seq
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model_dir = "."
|
|
|
|
model = AutoModelForVision2Seq.from_pretrained(
|
|
model_dir,
|
|
trust_remote_code=True,
|
|
torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,
|
|
)
|
|
|
|
|
|
|
|
processor = AutoProcessor.from_pretrained(
|
|
model_dir,
|
|
trust_remote_code=True,
|
|
use_fast=True
|
|
)
|
|
|
|
|
|
if hasattr(processor, 'patch_size') and processor.patch_size is None:
|
|
processor.patch_size = 14
|
|
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
model = model.to(device).eval()
|
|
|
|
|
|
|
|
|
|
image_url = "https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg"
|
|
resp = requests.get(image_url)
|
|
image = Image.open(BytesIO(resp.content)).convert("RGB")
|
|
|
|
|
|
|
|
|
|
|
|
image_inputs = processor.process_images([image])
|
|
image_inputs = {k: v.to(device) for k, v in image_inputs.items()}
|
|
|
|
|
|
text = "A photo of a tiger"
|
|
text_inputs = processor.process_queries([text])
|
|
text_inputs = {k: v.to(device) for k, v in text_inputs.items()}
|
|
|
|
|
|
|
|
|
|
with torch.no_grad():
|
|
|
|
image_embedding = model(**image_inputs)
|
|
|
|
|
|
text_embedding = model(**text_inputs)
|
|
|
|
|
|
score = torch.matmul(text_embedding, image_embedding.T).item()
|
|
|
|
print(f"Similarity score between text and image: {score:.4f}")
|
|
|