backup / tools /qa_eval.py
MatchLab's picture
Upload folder using huggingface_hub
c94c8c9 verified
import torch, torch.nn as nn
from transformers import BertTokenizer, BertModel # or CLIPTextModel
from PIL import Image
from transformers import (
AutoImageProcessor,
AutoTokenizer,
AutoModelForCausalLM,
)
import torch.nn.functional as F
class MultiViewVQAClassifier(nn.Module):
def __init__(self,
image_embed_dim: int,
num_answers: int,
fusion_width: int = 512,
n_fusion_layers: int = 4):
super().__init__()
# ---- text encoder ----
text_embed_dim = image_embed_dim
# ---- projections to a shared width ----
self.img_proj = nn.Linear(image_embed_dim, fusion_width)
self.txt_proj = nn.Linear(text_embed_dim, fusion_width)
# ---- fusion encoder over (32 + L) tokens ----
model_root = "qihoo360/fg-clip-base"
# image_size=224
self.model = AutoModelForCausalLM.from_pretrained(model_root,trust_remote_code=True).cuda()
# device = model.device
encoder_layer = nn.TransformerEncoderLayer(
d_model=fusion_width, nhead=8, dim_feedforward=fusion_width * 4, batch_first=True
)
self.fusion = nn.TransformerEncoder(encoder_layer, num_layers=n_fusion_layers)
# ---- classification head ----
self.head = nn.Sequential(
nn.Linear(fusion_width, fusion_width),
nn.ReLU(),
nn.Linear(fusion_width, num_answers)
)
def forward(self, images, questions, answer_targets=None):
"""
image_cls : FloatTensor (B, 32, D_img) – CLS from every view
questions : list[str] – raw question strings
answer_targets : LongTensor (B,) or None – index in answer vocab
"""
image_cls = torch.stack([
self.model.get_image_features(images[:, i, ...]) # (B, D_img)
for i in range(images.shape[1])
], dim=1)
B = image_cls.size(0)
# project images
img_tokens = self.img_proj(image_cls) # (B,32,512)
# encode question
txt_hidden = self.model.get_text_features(questions)
txt_tokens = self.txt_proj(txt_hidden).unsqueeze(1) # (B,L,512)
# concat & fuse
fused = torch.cat([img_tokens, txt_tokens], dim=1) # (B,32+L,512)
fused = self.fusion(fused) # (B,32+L,512)
# pool – use first image view token (or mean-pool)
pooled = fused[:, 0] # (B,512)
logits = self.head(pooled) # (B,num_answers)
loss = None
if answer_targets is not None:
loss = nn.functional.cross_entropy(logits, answer_targets)
return {"logits": logits, "loss": loss}
if __name__ == "__main__":
import numpy as np
# Example usage
images = ['light_scannet/scene0000_00/color/00140.jpg', 'light_scannet/scene0000_00/color/00400.jpg']
images = [Image.open(img).convert('RGB') for img in images]
images = np.array(images)
model_root = "qihoo360/fg-clip-base"
image_processor = AutoImageProcessor.from_pretrained(model_root)
images = image_processor.preprocess(images, return_tensors='pt')['pixel_values']
images = torch.tensor(images, dtype=torch.float32).unsqueeze(0) # Add batch dimension
questions = ["What is in the image?"]
tokenizer = AutoTokenizer.from_pretrained(model_root)
q = tokenizer(questions, return_tensors="pt", padding=True, truncation=True)
q = torch.tensor(q.input_ids, dtype=torch.long).cuda()
print(images.shape, q.shape)
model = MultiViewVQAClassifier(image_embed_dim=512, num_answers=1000)
model = model.cuda()
images = images.cuda()
result = model(images, q)
print(result["logits"].shape, result["loss"])