backup / zero_shot_qa.py
MatchLab's picture
Upload folder using huggingface_hub
6f89716 verified
# --- Required imports ---
from transformers import AutoModelForCausalLM
from peft import LoraConfig, get_peft_model
from safetensors.torch import load_file
import glob
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
def load_safetensor_from_hf(repo_id, filename, repo_type="dataset"):
cached_path = hf_hub_download(
repo_id=repo_id,
filename=filename,
repo_type=repo_type,
local_files_only=True
)
return load_file(cached_path)
def load_pretrain(model, pretrain_ckpt_path):
print(f"πŸ“‚ Loading pretrained weights from: {str(pretrain_ckpt_path)}")
# self.accelerator.load_state('/home/m50048399/transfered/ye_project/UniPointMap/results/sqa3d_sft_align_run1_b128_SQA3D_ScanNetSQA3D_sqa3d_sft_align_run1/point_map_finetuned/ckpt/')
# Search for safetensors files
model_weight_path_pattern = pretrain_ckpt_path + "/model*.safetensors"
model_weight_paths = glob.glob(model_weight_path_pattern)
if len(model_weight_paths) == 0:
raise FileNotFoundError(f"❌ Cannot find any .safetensors file in {str(pretrain_ckpt_path)}")
# Load and merge weights
weights = {}
for model_weight_path in model_weight_paths:
print(f"πŸ“₯ Loading weights from: {model_weight_path}")
weights.update(load_file(model_weight_path, device="cpu"))
# Load weights with strict=False
result = model.load_state_dict(weights, strict=False)
model_keys = set(model.state_dict().keys())
loaded_keys = model_keys.intersection(weights.keys())
missing_keys = result.missing_keys
unexpected_keys = result.unexpected_keys
breakpoint()
print(f"βœ… Loaded keys: {len(loaded_keys)} / {len(model_keys)}")
print(f"❌ Missing keys: {len(missing_keys)}")
print(f"⚠️ Unexpected keys: {len(unexpected_keys)}")
class RepModel(nn.Module):
def __init__(self):
super(RepModel, self).__init__()
# --- Model + LoRA configuration ---
model_root = 'fg-clip-base'
lora_config = LoraConfig(
r=32, # Rank of LoRA matrices
lora_alpha=64, # Scaling factor (β‰ˆ 2 Γ— r)
target_modules=["q_proj", "v_proj", "k_proj", "fc1", "fc2"], # Attention + FFN
lora_dropout=0.05, # Dropout rate
bias="none", # Do not tune bias
task_type="FEATURE_EXTRACTION" # LoRA mode; can also use "CAUSAL_LM"
)
# --- Load and wrap model ---
target_model = AutoModelForCausalLM.from_pretrained(
model_root,
trust_remote_code=True
)
self.target_model = get_peft_model(target_model, lora_config)
# (optional) print summary
self.target_model.print_trainable_parameters()
def get_image_feature(self, point_map):
return self.target_model.get_image_features(point_map)
def forward(self, data_dict):
point_map = data_dict['point_map'] # B, 32, 3, 224, 224
self.target_model.get_image_features(point_map)
# --- Load pretrained weights ---
ckpt_path = '/home/m50048399/transfered/ye_project/checkpoints/sceneverse_scannet_exp1_b64_Pretrain_all_scannet_training_run1/poma/ckpt'
model = RepModel()
load_pretrain(model, ckpt_path)