| # # from transformers import BertTokenizer, T5Tokenizer, AutoTokenizer | |
| # # from transformers import BertConfig, BertModel, AutoProcessor | |
| # # # # # tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased") | |
| # # # # tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", do_lower_case=True) | |
| # # # BertModel.from_pretrained("bert-base-uncased") | |
| # # # import numpy as np | |
| # # # pc = np.load( | |
| # # # '/mnt/new_drive/SceneVerse/light_scannet/scene0006_00/pm.npy' | |
| # # # ) | |
| # # # print(pc.shape) | |
| # # processor = AutoProcessor.from_pretrained("google/siglip-base-patch16-224") | |
| # import torch | |
| # from PIL import Image | |
| # from transformers import ( | |
| # AutoImageProcessor, | |
| # AutoTokenizer, | |
| # AutoModelForCausalLM, | |
| # ) | |
| # model_root = "qihoo360/fg-clip-base" | |
| # # image_size=224 | |
| # model = AutoModelForCausalLM.from_pretrained(model_root,trust_remote_code=True).cuda() | |
| # # device = model.device | |
| # tokenizer = AutoTokenizer.from_pretrained(model_root) | |
| # image_processor = AutoImageProcessor.from_pretrained(model_root) | |
| # # captions=["a photo of a cat", "a photo of a dog"] | |
| # # caption_input = torch.tensor(tokenizer(captions, max_length=77, padding="max_length", truncation=True).input_ids, dtype=torch.long, device=device) | |
| # # walk_short_pos = True | |
| # # text_feature = model.get_text_features(caption_input,walk_short_pos=walk_short_pos) | |
| # # print(text_feature.shape) | |
| # import numpy as np | |
| # from PIL import Image | |
| # import matplotlib.pyplot as plt | |
| # # paths | |
| # rgb_path = '/home/m50048399/SceneVerse/light_arkitscenes/40753686/color/40753686_6855.418.png' | |
| # pm_path = '/home/m50048399/SceneVerse/light_arkitscenes/40753686/pm.npy' | |
| # # load rgb image | |
| # rgb_img = np.array(Image.open(rgb_path).convert('RGB')) # shape (H, W, 3) | |
| # print(rgb_img.shape) | |
| # rgb_img = image_processor.preprocess(rgb_img, return_tensors='pt')['pixel_values'] | |
| # rgb_img = rgb_img[0].permute(1,2,0).numpy() | |
| # # load point map | |
| # pm = np.load(pm_path)[1, ...] # shape could be (H, W, 3) or other — check below | |
| # print(f"RGB shape: {rgb_img.shape}") | |
| # print(f"PM shape: {pm.shape}") | |
| # # visualize | |
| # fig, ax = plt.subplots(1, 2, figsize=(12, 6)) | |
| # ax[0].imshow(rgb_img) | |
| # ax[0].set_title('RGB Image') | |
| # ax[0].axis('off') | |
| # # if pm has 3 channels (X, Y, Z), you can visualize as depth | |
| # if pm.ndim == 3 and pm.shape[2] == 3: | |
| # depth = pm[..., 1] # Z value as depth | |
| # ax[1].imshow(depth, cmap='viridis') | |
| # ax[1].set_title('PM Depth (Z)') | |
| # else: | |
| # ax[1].imshow(pm, cmap='viridis') | |
| # ax[1].set_title('PM') | |
| # ax[1].axis('off') | |
| # plt.tight_layout() | |
| # plt.savefig('test.png', dpi=300) | |
| # import os | |
| # import numpy as np | |
| # from PIL import Image | |
| # from datasets import Dataset, Features, Value, Array4D | |
| # from tqdm import tqdm | |
| # # === CONFIG === | |
| # ROOT_DIR = "light_3rscan" # your dataset root path | |
| # MAX_IMAGES = 50 # cap for shape consistency (optional) | |
| # def load_images_from_folder(folder, exts, max_images=None, is_depth=False): | |
| # files = sorted([f for f in os.listdir(folder) if f.lower().endswith(exts)]) | |
| # if not files: | |
| # print(f"Skipping {folder}: no {exts} files found.") | |
| # return None | |
| # if max_images: | |
| # files = files[:max_images] | |
| # images = [] | |
| # for file in files: | |
| # img_path = os.path.join(folder, file) | |
| # img = Image.open(img_path) | |
| # if is_depth: | |
| # # If .pgm, preserve raw data | |
| # img_array = np.array(img) # preserves uint8 or uint16 | |
| # if img_array.ndim == 2: | |
| # img_array = img_array[..., None] # (H, W, 1) | |
| # else: | |
| # img = img.convert("RGB") | |
| # img_array = np.array(img) | |
| # images.append(img_array) | |
| # return np.stack(images, axis=0) # (N, H, W, C) | |
| # # === GENERATOR FUNCTION === | |
| # def generate_examples(): | |
| # for scene_folder in tqdm(sorted(os.listdir(ROOT_DIR)), desc="Processing scenes"): | |
| # scene_path = os.path.join(ROOT_DIR, scene_folder) | |
| # if not os.path.isdir(scene_path): | |
| # continue | |
| # color_folder = os.path.join(scene_path, "color") | |
| # depth_folder = os.path.join(scene_path, "depth") | |
| # pm_path = os.path.join(scene_path, "pm.npy") | |
| # if not (os.path.exists(color_folder) and os.path.exists(depth_folder) and os.path.exists(pm_path)): | |
| # print(f"Skipping {scene_folder}, missing data.") | |
| # continue | |
| # try: | |
| # color_images = load_images_from_folder(color_folder, (".jpg",), MAX_IMAGES, is_depth=False) | |
| # depth_images = load_images_from_folder(depth_folder, (".pgm",), MAX_IMAGES, is_depth=True) | |
| # point_map = np.load(pm_path) # (H, W, D) or similar | |
| # print(color_images.shape, depth_images.shape, point_map.shape) | |
| # except Exception as e: | |
| # print(f"Error processing {scene_folder}: {e}") | |
| # continue | |
| # yield { | |
| # "scene_id": scene_folder, | |
| # "color_images": color_images, | |
| # "depth_images": depth_images, | |
| # "point_map": point_map, | |
| # } | |
| # # === DETERMINE SHAPES === | |
| # # Load first sample to get shapes for Features | |
| # sample = next(generate_examples()) | |
| # N, H, W, C = sample["color_images"].shape | |
| # N_d, H_d, W_d, C_d = sample["depth_images"].shape | |
| # N_p, H_p, W_p, C_p = sample["point_map"].shape | |
| # features = Features({ | |
| # "scene_id": Value("string"), | |
| # "color_images": Array4D(dtype="uint32", shape=(N, H, W, C)), | |
| # "depth_images": Array4D(dtype="uint32", shape=(N_d, H_d, W_d, C_d)), | |
| # "point_map": Array4D(dtype="float16", shape=(N_p, H_p, W_p, C_p)), | |
| # }) | |
| # # === BUILD DATASET === | |
| # dataset = Dataset.from_generator(generate_examples, features=features) | |
| # # === SAVE TO DISK === | |
| # dataset.save_to_disk("light_3rscan_hf") | |
| # print("✅ Dataset saved to 'light_3rscan_hf'.") | |
| # # === TEST LOADING === | |
| # from datasets import load_from_disk | |
| # ds = load_from_disk("light_3rscan_hf") | |
| # print(ds) | |
| # print(ds[0]["scene_id"], ds[0]["color_images"].shape, ds[0]["depth_images"].shape, ds[0]["point_map"].shape) | |
| import torch | |
| import torch.nn as nn | |
| from transformers import AutoProcessor | |
| from transformers.image_utils import load_image | |
| from transformers.models.siglip.modeling_siglip import ( | |
| SiglipModel, | |
| SiglipVisionModel, | |
| SiglipTextModel, | |
| SiglipPreTrainedModel, | |
| SiglipVisionTransformer, | |
| ) | |
| from transformers.models.siglip.configuration_siglip import ( | |
| SiglipVisionConfig, | |
| SiglipTextConfig, | |
| ) | |
| from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling | |
| from transformers.utils import can_return_tuple, add_start_docstrings_to_model_forward, replace_return_docstrings | |
| from transformers.models.siglip.modeling_siglip import SIGLIP_VISION_INPUTS_DOCSTRING | |
| from typing import Optional | |
| class MySiglipVisionTransformer(SiglipVisionTransformer): | |
| def forward( | |
| self, | |
| pixel_values, | |
| output_attentions: Optional[bool] = None, | |
| output_hidden_states: Optional[bool] = None, | |
| interpolate_pos_encoding: Optional[bool] = False, | |
| ) -> BaseModelOutputWithPooling: | |
| r""" | |
| Returns: | |
| """ | |
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions | |
| output_hidden_states = ( | |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states | |
| ) | |
| hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) | |
| encoder_outputs: BaseModelOutput = self.encoder( | |
| inputs_embeds=hidden_states, | |
| output_attentions=output_attentions, | |
| output_hidden_states=output_hidden_states, | |
| ) | |
| last_hidden_state = encoder_outputs.last_hidden_state | |
| last_hidden_state = self.post_layernorm(last_hidden_state) | |
| pooler_output = self.head(last_hidden_state) if self.use_head else None | |
| return BaseModelOutputWithPooling( | |
| last_hidden_state=last_hidden_state, | |
| pooler_output=pooler_output, | |
| hidden_states=encoder_outputs.hidden_states, | |
| attentions=encoder_outputs.attentions, | |
| ) | |
| class MySiglipVisionModel(SiglipVisionModel): | |
| def __init__(self, config): | |
| SiglipPreTrainedModel.__init__(self, config) | |
| self.vision_model = MySiglipVisionTransformer(config) | |
| self.post_init() | |
| class MySiglipModel(SiglipModel): | |
| def __init__(self, config): | |
| SiglipPreTrainedModel.__init__(self, config) | |
| if not isinstance(config.text_config, SiglipTextConfig): | |
| raise TypeError( | |
| "config.text_config is expected to be of type SiglipTextConfig but is of type" | |
| f" {type(config.text_config)}." | |
| ) | |
| if not isinstance(config.vision_config, SiglipVisionConfig): | |
| raise TypeError( | |
| "config.vision_config is expected to be of type SiglipVisionConfig but is of type" | |
| f" {type(config.vision_config)}." | |
| ) | |
| text_config = config.text_config | |
| vision_config = config.vision_config | |
| # First, initialize the text and vision models with proper attention implementation | |
| text_model = SiglipTextModel._from_config(text_config) | |
| vision_model = MySiglipVisionModel._from_config(config.vision_config) | |
| # Second, get the text and vision submodules (for backward compatibility) | |
| self.text_model = text_model.text_model | |
| self.vision_model = vision_model.vision_model | |
| self.logit_scale = nn.Parameter(torch.randn(1)) | |
| self.logit_bias = nn.Parameter(torch.randn(1)) | |
| # Initialize weights and apply final processing | |
| self.post_init() | |
| # load the model and processor | |
| ckpt = "../siglip2-base-patch16-256" | |
| model = MySiglipModel.from_pretrained(ckpt, device_map="auto").eval() | |
| processor = AutoProcessor.from_pretrained(ckpt, trust_remote_code=True) | |
| import inspect | |
| print(inspect.getfile(model.get_image_features)) | |
| # load the image | |
| image = load_image("https://huggingface.co/datasets/merve/coco/resolve/main/val2017/000000000285.jpg") | |
| inputs = processor(images=[image], return_tensors="pt").to(model.device) | |
| # run infernece | |
| with torch.no_grad(): | |
| image_embeddings = model.get_image_features(**inputs, output_hidden_states = True) | |
| print(image_embeddings.shape) | |