File size: 10,635 Bytes
c94c8c9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
# # from transformers import BertTokenizer, T5Tokenizer, AutoTokenizer

# # from transformers import BertConfig, BertModel, AutoProcessor
# # # # # tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")

# # # # tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", do_lower_case=True)
# # # BertModel.from_pretrained("bert-base-uncased") 

# # # import numpy as np

# # # pc = np.load(
# # #     '/mnt/new_drive/SceneVerse/light_scannet/scene0006_00/pm.npy'
# # # )

# # # print(pc.shape)

# # processor = AutoProcessor.from_pretrained("google/siglip-base-patch16-224")

# import torch
# from PIL import Image
# from transformers import (
#     AutoImageProcessor,
#     AutoTokenizer,
#     AutoModelForCausalLM,
# )

# model_root = "qihoo360/fg-clip-base"
# # image_size=224
# model = AutoModelForCausalLM.from_pretrained(model_root,trust_remote_code=True).cuda()

# # device = model.device

# tokenizer = AutoTokenizer.from_pretrained(model_root)
# image_processor = AutoImageProcessor.from_pretrained(model_root)
# # captions=["a photo of a cat", "a photo of a dog"]
# # caption_input = torch.tensor(tokenizer(captions, max_length=77, padding="max_length", truncation=True).input_ids, dtype=torch.long, device=device)
# # walk_short_pos = True
# # text_feature = model.get_text_features(caption_input,walk_short_pos=walk_short_pos)
# # print(text_feature.shape)


# import numpy as np
# from PIL import Image
# import matplotlib.pyplot as plt

# # paths
# rgb_path = '/home/m50048399/SceneVerse/light_arkitscenes/40753686/color/40753686_6855.418.png'
# pm_path  = '/home/m50048399/SceneVerse/light_arkitscenes/40753686/pm.npy'

# # load rgb image
# rgb_img = np.array(Image.open(rgb_path).convert('RGB'))  # shape (H, W, 3)

# print(rgb_img.shape)
# rgb_img = image_processor.preprocess(rgb_img, return_tensors='pt')['pixel_values']

# rgb_img = rgb_img[0].permute(1,2,0).numpy()
# # load point map
# pm = np.load(pm_path)[1, ...]  # shape could be (H, W, 3) or other — check below

# print(f"RGB shape: {rgb_img.shape}")
# print(f"PM shape: {pm.shape}")

# # visualize
# fig, ax = plt.subplots(1, 2, figsize=(12, 6))

# ax[0].imshow(rgb_img)
# ax[0].set_title('RGB Image')
# ax[0].axis('off')

# # if pm has 3 channels (X, Y, Z), you can visualize as depth
# if pm.ndim == 3 and pm.shape[2] == 3:
#     depth = pm[..., 1]  # Z value as depth
#     ax[1].imshow(depth, cmap='viridis')
#     ax[1].set_title('PM Depth (Z)')
# else:
#     ax[1].imshow(pm, cmap='viridis')
#     ax[1].set_title('PM')
    
# ax[1].axis('off')

# plt.tight_layout()

# plt.savefig('test.png', dpi=300)


# import os
# import numpy as np
# from PIL import Image
# from datasets import Dataset, Features, Value, Array4D
# from tqdm import tqdm

# # === CONFIG ===
# ROOT_DIR = "light_3rscan"       # your dataset root path
# MAX_IMAGES = 50                  # cap for shape consistency (optional)

# def load_images_from_folder(folder, exts, max_images=None, is_depth=False):
#     files = sorted([f for f in os.listdir(folder) if f.lower().endswith(exts)])
#     if not files:
#         print(f"Skipping {folder}: no {exts} files found.")
#         return None
#     if max_images:
#         files = files[:max_images]
#     images = []
#     for file in files:
#         img_path = os.path.join(folder, file)
#         img = Image.open(img_path)
#         if is_depth:
#             # If .pgm, preserve raw data
#             img_array = np.array(img)  # preserves uint8 or uint16
#             if img_array.ndim == 2:
#                 img_array = img_array[..., None]  # (H, W, 1)
#         else:
#             img = img.convert("RGB")
#             img_array = np.array(img)
#         images.append(img_array)
#     return np.stack(images, axis=0)  # (N, H, W, C)

# # === GENERATOR FUNCTION ===
# def generate_examples():
#     for scene_folder in tqdm(sorted(os.listdir(ROOT_DIR)), desc="Processing scenes"):
#         scene_path = os.path.join(ROOT_DIR, scene_folder)
#         if not os.path.isdir(scene_path):
#             continue

#         color_folder = os.path.join(scene_path, "color")
#         depth_folder = os.path.join(scene_path, "depth")
#         pm_path = os.path.join(scene_path, "pm.npy")

#         if not (os.path.exists(color_folder) and os.path.exists(depth_folder) and os.path.exists(pm_path)):
#             print(f"Skipping {scene_folder}, missing data.")
#             continue

#         try:
#             color_images = load_images_from_folder(color_folder, (".jpg",), MAX_IMAGES, is_depth=False)
#             depth_images = load_images_from_folder(depth_folder, (".pgm",), MAX_IMAGES, is_depth=True)
#             point_map = np.load(pm_path)  # (H, W, D) or similar
#             print(color_images.shape, depth_images.shape, point_map.shape)
#         except Exception as e:
#             print(f"Error processing {scene_folder}: {e}")
#             continue

#         yield {
#             "scene_id": scene_folder,
#             "color_images": color_images,
#             "depth_images": depth_images,
#             "point_map": point_map,
#         }

# # === DETERMINE SHAPES ===
# # Load first sample to get shapes for Features
# sample = next(generate_examples())
# N, H, W, C = sample["color_images"].shape
# N_d, H_d, W_d, C_d = sample["depth_images"].shape
# N_p, H_p, W_p, C_p = sample["point_map"].shape

# features = Features({
#     "scene_id": Value("string"),
#     "color_images": Array4D(dtype="uint32", shape=(N, H, W, C)),
#     "depth_images": Array4D(dtype="uint32", shape=(N_d, H_d, W_d, C_d)),
#     "point_map": Array4D(dtype="float16", shape=(N_p, H_p, W_p, C_p)),
# })

# # === BUILD DATASET ===
# dataset = Dataset.from_generator(generate_examples, features=features)

# # === SAVE TO DISK ===
# dataset.save_to_disk("light_3rscan_hf")
# print("✅ Dataset saved to 'light_3rscan_hf'.")

# # === TEST LOADING ===
# from datasets import load_from_disk
# ds = load_from_disk("light_3rscan_hf")
# print(ds)
# print(ds[0]["scene_id"], ds[0]["color_images"].shape, ds[0]["depth_images"].shape, ds[0]["point_map"].shape)


import torch
import torch.nn as nn
from transformers import AutoProcessor
from transformers.image_utils import load_image
from transformers.models.siglip.modeling_siglip import (
    SiglipModel,
    SiglipVisionModel,
    SiglipTextModel,
    SiglipPreTrainedModel,
    SiglipVisionTransformer,
)
from transformers.models.siglip.configuration_siglip import (
    SiglipVisionConfig,
    SiglipTextConfig,
)
from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from transformers.utils import can_return_tuple, add_start_docstrings_to_model_forward, replace_return_docstrings
from transformers.models.siglip.modeling_siglip import SIGLIP_VISION_INPUTS_DOCSTRING
from typing import Optional

class MySiglipVisionTransformer(SiglipVisionTransformer):
    @can_return_tuple
    @add_start_docstrings_to_model_forward(SIGLIP_VISION_INPUTS_DOCSTRING)
    @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=SiglipVisionConfig)
    def forward(
        self,
        pixel_values,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        interpolate_pos_encoding: Optional[bool] = False,
    ) -> BaseModelOutputWithPooling:
        r"""
        Returns:

        """
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = (
            output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        )

        hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)

        encoder_outputs: BaseModelOutput = self.encoder(
            inputs_embeds=hidden_states,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
        )

        last_hidden_state = encoder_outputs.last_hidden_state
        last_hidden_state = self.post_layernorm(last_hidden_state)

        pooler_output = self.head(last_hidden_state) if self.use_head else None
        return BaseModelOutputWithPooling(
            last_hidden_state=last_hidden_state,
            pooler_output=pooler_output,
            hidden_states=encoder_outputs.hidden_states,
            attentions=encoder_outputs.attentions,
        )

class MySiglipVisionModel(SiglipVisionModel):
    def __init__(self, config):
        SiglipPreTrainedModel.__init__(self, config) 
        self.vision_model = MySiglipVisionTransformer(config)
        self.post_init()
        
        
class MySiglipModel(SiglipModel):
    def __init__(self, config):
        SiglipPreTrainedModel.__init__(self, config) 
        
        if not isinstance(config.text_config, SiglipTextConfig):
            raise TypeError(
                "config.text_config is expected to be of type SiglipTextConfig but is of type"
                f" {type(config.text_config)}."
            )

        if not isinstance(config.vision_config, SiglipVisionConfig):
            raise TypeError(
                "config.vision_config is expected to be of type SiglipVisionConfig but is of type"
                f" {type(config.vision_config)}."
            )

        text_config = config.text_config
        vision_config = config.vision_config

        # First, initialize the text and vision models with proper attention implementation
        text_model = SiglipTextModel._from_config(text_config)
        vision_model = MySiglipVisionModel._from_config(config.vision_config)

        # Second, get the text and vision submodules (for backward compatibility)
        self.text_model = text_model.text_model
        self.vision_model = vision_model.vision_model

        self.logit_scale = nn.Parameter(torch.randn(1))
        self.logit_bias = nn.Parameter(torch.randn(1))

        # Initialize weights and apply final processing
        self.post_init()

# load the model and processor
ckpt = "../siglip2-base-patch16-256"

model = MySiglipModel.from_pretrained(ckpt, device_map="auto").eval()
processor = AutoProcessor.from_pretrained(ckpt, trust_remote_code=True)

import inspect
print(inspect.getfile(model.get_image_features))

# load the image
image = load_image("https://huggingface.co/datasets/merve/coco/resolve/main/val2017/000000000285.jpg")
inputs = processor(images=[image], return_tensors="pt").to(model.device)

# run infernece
with torch.no_grad():
    image_embeddings = model.get_image_features(**inputs, output_hidden_states = True)    

print(image_embeddings.shape)