File size: 4,018 Bytes
c94c8c9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
import torch, torch.nn as nn
from transformers import BertTokenizer, BertModel   # or CLIPTextModel
from PIL import Image
from transformers import (
    AutoImageProcessor,
    AutoTokenizer,
    AutoModelForCausalLM,
)
import torch.nn.functional as F

class MultiViewVQAClassifier(nn.Module):
    def __init__(self,
                 image_embed_dim: int,
                 num_answers: int,
                 fusion_width: int = 512,
                 n_fusion_layers: int = 4):

        super().__init__()
        # ---- text encoder ----
        
        text_embed_dim = image_embed_dim

        # ---- projections to a shared width ----
        self.img_proj  = nn.Linear(image_embed_dim, fusion_width)
        self.txt_proj  = nn.Linear(text_embed_dim, fusion_width)

        # ---- fusion encoder over (32 + L) tokens ----
        model_root = "qihoo360/fg-clip-base"
        # image_size=224
        self.model = AutoModelForCausalLM.from_pretrained(model_root,trust_remote_code=True).cuda()
        
        # device = model.device
        
        encoder_layer = nn.TransformerEncoderLayer(
            d_model=fusion_width, nhead=8, dim_feedforward=fusion_width * 4, batch_first=True
        )
        
        self.fusion = nn.TransformerEncoder(encoder_layer, num_layers=n_fusion_layers)

        # ---- classification head ----
        self.head = nn.Sequential(
            nn.Linear(fusion_width, fusion_width),
            nn.ReLU(),
            nn.Linear(fusion_width, num_answers)
        )

    def forward(self, images, questions, answer_targets=None):
        """
        image_cls      : FloatTensor (B, 32, D_img)  – CLS from every view
        questions      : list[str]                    – raw question strings
        answer_targets : LongTensor (B,) or None      – index in answer vocab
        """
        
        image_cls = torch.stack([
            self.model.get_image_features(images[:, i, ...])  # (B, D_img)
            for i in range(images.shape[1])
        ], dim=1) 
        
        B = image_cls.size(0)
        
        # project images
        img_tokens = self.img_proj(image_cls)                        # (B,32,512)

        # encode question
        
        txt_hidden = self.model.get_text_features(questions)
        txt_tokens = self.txt_proj(txt_hidden).unsqueeze(1)          # (B,L,512)

        # concat & fuse
        fused = torch.cat([img_tokens, txt_tokens], dim=1)           # (B,32+L,512)
        fused = self.fusion(fused)                                   # (B,32+L,512)

        # pool – use first image view token (or mean-pool)
        pooled = fused[:, 0]                                         # (B,512)

        logits = self.head(pooled)                                   # (B,num_answers)
        loss = None
        if answer_targets is not None:
            loss = nn.functional.cross_entropy(logits, answer_targets)

        return {"logits": logits, "loss": loss}
    
    
if __name__ == "__main__":
    import numpy as np
   
    # Example usage
    images = ['light_scannet/scene0000_00/color/00140.jpg', 'light_scannet/scene0000_00/color/00400.jpg']
    images = [Image.open(img).convert('RGB') for img in images]
    images = np.array(images)
    model_root = "qihoo360/fg-clip-base"
    image_processor = AutoImageProcessor.from_pretrained(model_root)
    images = image_processor.preprocess(images, return_tensors='pt')['pixel_values']

    images = torch.tensor(images, dtype=torch.float32).unsqueeze(0)  # Add batch dimension
    questions = ["What is in the image?"]
    
    tokenizer = AutoTokenizer.from_pretrained(model_root)
    q = tokenizer(questions, return_tensors="pt", padding=True, truncation=True)
    q = torch.tensor(q.input_ids, dtype=torch.long).cuda() 
    print(images.shape, q.shape)
    
    
    model = MultiViewVQAClassifier(image_embed_dim=512, num_answers=1000)

    model = model.cuda()
    images = images.cuda()
    
    result = model(images, q)
    print(result["logits"].shape, result["loss"])