|
|
import torch, torch.nn as nn |
|
|
from transformers import BertTokenizer, BertModel |
|
|
from PIL import Image |
|
|
from transformers import ( |
|
|
AutoImageProcessor, |
|
|
AutoTokenizer, |
|
|
AutoModelForCausalLM, |
|
|
) |
|
|
import torch.nn.functional as F |
|
|
|
|
|
class MultiViewVQAClassifier(nn.Module): |
|
|
def __init__(self, |
|
|
image_embed_dim: int, |
|
|
num_answers: int, |
|
|
fusion_width: int = 512, |
|
|
n_fusion_layers: int = 4): |
|
|
|
|
|
super().__init__() |
|
|
|
|
|
|
|
|
text_embed_dim = image_embed_dim |
|
|
|
|
|
|
|
|
self.img_proj = nn.Linear(image_embed_dim, fusion_width) |
|
|
self.txt_proj = nn.Linear(text_embed_dim, fusion_width) |
|
|
|
|
|
|
|
|
model_root = "qihoo360/fg-clip-base" |
|
|
|
|
|
self.model = AutoModelForCausalLM.from_pretrained(model_root,trust_remote_code=True).cuda() |
|
|
|
|
|
|
|
|
|
|
|
encoder_layer = nn.TransformerEncoderLayer( |
|
|
d_model=fusion_width, nhead=8, dim_feedforward=fusion_width * 4, batch_first=True |
|
|
) |
|
|
|
|
|
self.fusion = nn.TransformerEncoder(encoder_layer, num_layers=n_fusion_layers) |
|
|
|
|
|
|
|
|
self.head = nn.Sequential( |
|
|
nn.Linear(fusion_width, fusion_width), |
|
|
nn.ReLU(), |
|
|
nn.Linear(fusion_width, num_answers) |
|
|
) |
|
|
|
|
|
def forward(self, images, questions, answer_targets=None): |
|
|
""" |
|
|
image_cls : FloatTensor (B, 32, D_img) – CLS from every view |
|
|
questions : list[str] – raw question strings |
|
|
answer_targets : LongTensor (B,) or None – index in answer vocab |
|
|
""" |
|
|
|
|
|
image_cls = torch.stack([ |
|
|
self.model.get_image_features(images[:, i, ...]) |
|
|
for i in range(images.shape[1]) |
|
|
], dim=1) |
|
|
|
|
|
B = image_cls.size(0) |
|
|
|
|
|
|
|
|
img_tokens = self.img_proj(image_cls) |
|
|
|
|
|
|
|
|
|
|
|
txt_hidden = self.model.get_text_features(questions) |
|
|
txt_tokens = self.txt_proj(txt_hidden).unsqueeze(1) |
|
|
|
|
|
|
|
|
fused = torch.cat([img_tokens, txt_tokens], dim=1) |
|
|
fused = self.fusion(fused) |
|
|
|
|
|
|
|
|
pooled = fused[:, 0] |
|
|
|
|
|
logits = self.head(pooled) |
|
|
loss = None |
|
|
if answer_targets is not None: |
|
|
loss = nn.functional.cross_entropy(logits, answer_targets) |
|
|
|
|
|
return {"logits": logits, "loss": loss} |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
import numpy as np |
|
|
|
|
|
|
|
|
images = ['light_scannet/scene0000_00/color/00140.jpg', 'light_scannet/scene0000_00/color/00400.jpg'] |
|
|
images = [Image.open(img).convert('RGB') for img in images] |
|
|
images = np.array(images) |
|
|
model_root = "qihoo360/fg-clip-base" |
|
|
image_processor = AutoImageProcessor.from_pretrained(model_root) |
|
|
images = image_processor.preprocess(images, return_tensors='pt')['pixel_values'] |
|
|
|
|
|
images = torch.tensor(images, dtype=torch.float32).unsqueeze(0) |
|
|
questions = ["What is in the image?"] |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_root) |
|
|
q = tokenizer(questions, return_tensors="pt", padding=True, truncation=True) |
|
|
q = torch.tensor(q.input_ids, dtype=torch.long).cuda() |
|
|
print(images.shape, q.shape) |
|
|
|
|
|
|
|
|
model = MultiViewVQAClassifier(image_embed_dim=512, num_answers=1000) |
|
|
|
|
|
model = model.cuda() |
|
|
images = images.cuda() |
|
|
|
|
|
result = model(images, q) |
|
|
print(result["logits"].shape, result["loss"]) |