| # import os | |
| # import torch | |
| # import json | |
| # import re | |
| # from PIL import Image | |
| # import torch.nn.functional as F | |
| # from safetensors.torch import load_file | |
| # from huggingface_hub import hf_hub_download | |
| # import sys | |
| # sys.path.append("/gpfs/home/ym621/UniPointMap") | |
| # import open_clip | |
| # # --------------------------- | |
| # # Helpers | |
| # # --------------------------- | |
| # def load_safetensor_from_hf(repo_id, filename, repo_type="dataset"): | |
| # cached_path = hf_hub_download( | |
| # repo_id=repo_id, | |
| # filename=filename, | |
| # revision='7bb7c7f3d379c5145bb06d2cf0949c66ac9a2c4e', | |
| # repo_type=repo_type, | |
| # local_files_only=True | |
| # ) | |
| # return load_file(cached_path) | |
| # def load_json(data_path: str): | |
| # with open(data_path, "r", encoding="utf-8") as f: | |
| # return json.load(f) | |
| # def load_jsonl(path): | |
| # data = [] | |
| # with open(path, "r", encoding="utf-8") as f: | |
| # for line in f: | |
| # if line.strip(): | |
| # data.append(json.loads(line)) | |
| # return data | |
| # # --------------------------- | |
| # # Load CLIP model | |
| # # --------------------------- | |
| # device = "cuda" if torch.cuda.is_available() else "cpu" | |
| # model, _, preprocess = open_clip.create_model_and_transforms( | |
| # 'ViT-B-16', pretrained='datacomp_xl_s13b_b90k' | |
| # ) | |
| # tokenizer = open_clip.get_tokenizer('ViT-B-16') | |
| # model = model.to(device).eval() | |
| # # --------------------------- | |
| # # Preload reference captions | |
| # # --------------------------- | |
| # scannet_data = load_jsonl('/gpfs/home/ym621/UniPointMap/PointMapVerse/existing_datasets/ScanNet/annotations/scannet_caption_per_view.jsonl') | |
| # arkitscenes_data = load_jsonl('/gpfs/home/ym621/UniPointMap/PointMapVerse/existing_datasets/Arkitscenes/annotations/arkitscenes_caption_per_view.jsonl') | |
| # rscan_data = load_jsonl('/gpfs/home/ym621/UniPointMap/PointMapVerse/existing_datasets/3RScan/annotations/3rscan_caption_per_view.jsonl') | |
| # org_data = {} | |
| # cur_scan_id = '' | |
| # for idx, data in enumerate([scannet_data, arkitscenes_data, rscan_data]): | |
| # if idx == 0: | |
| # root = 'light_scannet' | |
| # elif idx == 1: | |
| # root = 'light_arkitscenes' | |
| # else: | |
| # root = 'light_3rscan' | |
| # local_idx = 0 | |
| # for item in data: | |
| # if item['scan_id'] != cur_scan_id: | |
| # cur_scan_id = item['scan_id'] | |
| # local_idx = 0 | |
| # scan_id = f"{root}/{item['scan_id']}_{local_idx}" | |
| # org_data[scan_id] = item['utterance'].split('.') | |
| # local_idx += 1 | |
| # # --------------------------- | |
| # # Caching safetensors | |
| # # --------------------------- | |
| # safetensor_cache = {} | |
| # def get_image_from_safetensor(image_path, idx): | |
| # if image_path not in safetensor_cache: | |
| # safetensor_cache[image_path] = load_safetensor_from_hf( | |
| # 'MatchLab/PointMapVerse', image_path | |
| # ) | |
| # return safetensor_cache[image_path]['color_images'][idx] | |
| # # --------------------------- | |
| # # Process captions | |
| # # --------------------------- | |
| # caption_dir = "../captions" | |
| # captions = [f for f in os.listdir(caption_dir) if f.endswith('.json')] | |
| # filtered_captions = {} | |
| # count, total_count = 0, 0 | |
| # for cap in captions: | |
| # cap_path = os.path.join(caption_dir, cap) | |
| # caption_data = load_json(cap_path) | |
| # for k, v in caption_data.items(): | |
| # image_path = f"{'_'.join(k.split('_')[:-1])}.safetensors" | |
| # idx = int(k.split('_')[-1]) | |
| # # --- load + preprocess image --- | |
| # img_tensor = get_image_from_safetensor(image_path, idx) | |
| # img_tensor = img_tensor.cpu().numpy() | |
| # pil_img = Image.fromarray(img_tensor.astype("uint8")).convert("RGB") | |
| # image = preprocess(pil_img).unsqueeze(0).to(device) | |
| # with torch.no_grad(): | |
| # image_features = model.encode_image(image) | |
| # image_features = F.normalize(image_features, dim=-1) | |
| # # --- clean captions --- | |
| # if "1." in v: | |
| # v = v.split("1.", 1)[-1].strip() | |
| # v = "1." + v | |
| # if not v.startswith('1.'): | |
| # v = ["An image showing an indoor scene."] | |
| # count += 1 | |
| # else: | |
| # v = re.split(r'\s*\d+\.\s*', v) | |
| # v = [c.strip().replace('*', '') for c in v if c.strip()] | |
| # if len(v) < 4: | |
| # v = ["An image showing an indoor scene."] | |
| # count += 1 | |
| # # --- combine old + new captions --- | |
| # old_v = org_data.get(k, []) | |
| # all_v = old_v + v | |
| # # --- encode captions --- | |
| # with torch.no_grad(): | |
| # text_tokens = tokenizer(all_v).to(device) | |
| # text_features = model.encode_text(text_tokens) | |
| # text_features = F.normalize(text_features, dim=-1) | |
| # sims = (image_features @ text_features.T).squeeze(0) # [num_caps] | |
| # # --- sort captions (fast torch.topk instead of sorted) --- | |
| # topk_vals, topk_idx = torch.topk(sims, k=len(all_v)) | |
| # # print(topk_vals) | |
| # sorted_captions = [all_v[i] for i in topk_idx.tolist()] | |
| # # print(sorted_captions) | |
| # filtered_captions[k] = sorted_captions | |
| # total_count += 1 | |
| # if total_count % 50 == 0: | |
| # print(f"Processed {total_count} files...") | |
| # # --------------------------- | |
| # # Save results | |
| # # --------------------------- | |
| # output_path = os.path.join(caption_dir, "filtered_captions_sorted.json") | |
| # with open(output_path, "w", encoding="utf-8") as f: | |
| # json.dump(filtered_captions, f, indent=4) | |
| # print(f'Total captions not starting with "1.": {count} out of {total_count} captions.') | |
| # print(f"Sorted captions saved to {output_path}") | |
| # --------------------------- | |
| # Save results as JSONL | |
| # --------------------------- | |
| import os | |
| import json | |
| from transformers import AutoTokenizer | |
| # --------------------------- | |
| # Paths | |
| # --------------------------- | |
| caption_dir = "../captions" | |
| filtered_json_path = os.path.join(caption_dir, "filtered_captions_sorted.json") | |
| # Output files for each dataset | |
| output_paths = { | |
| "scannet": os.path.join(caption_dir, "filtered_captions_scannet.jsonl"), | |
| "arkitscenes": os.path.join(caption_dir, "filtered_captions_arkitscenes.jsonl"), | |
| "3rscan": os.path.join(caption_dir, "filtered_captions_3rscan.jsonl"), | |
| } | |
| # --------------------------- | |
| # Load filtered captions | |
| # --------------------------- | |
| with open(filtered_json_path, "r", encoding="utf-8") as f: | |
| filtered_captions = json.load(f) | |
| print(f"Loaded {len(filtered_captions)} scan entries.") | |
| # --------------------------- | |
| # Setup tokenizer (bert-base-uncased) | |
| # --------------------------- | |
| tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") | |
| # --------------------------- | |
| # Open three output files | |
| # --------------------------- | |
| files = {k: open(path, "w", encoding="utf-8") for k, path in output_paths.items()} | |
| line_ids = {"scannet": 1, "arkitscenes": 1, "3rscan": 1} | |
| # --------------------------- | |
| # Convert and write entries | |
| # --------------------------- | |
| for k, sorted_captions in filtered_captions.items(): | |
| # Determine dataset type | |
| if k.startswith("light_scannet"): | |
| dataset = "scannet" | |
| elif k.startswith("light_arkitscenes"): | |
| dataset = "arkitscenes" | |
| elif k.startswith("light_3rscan"): | |
| dataset = "3rscan" | |
| else: | |
| continue # skip unknown dataset keys | |
| image_path = f"{'_'.join(k.split('_')[:-1])}.safetensors" | |
| scan_id = "_".join(k.split("_")[:-1]).split("/")[-1] # e.g. scene0000_00 | |
| # Clean and join top-5 captions | |
| sorted_captions = [cap.replace('.', '').strip() for cap in sorted_captions] | |
| entry = { | |
| "item_id": f"{dataset}_train_{line_ids[dataset]:06d}", | |
| "scan_id": scan_id, | |
| "utterance": sorted_captions, | |
| "safetensors_path": image_path, | |
| } | |
| # Write to the correct file | |
| files[dataset].write(json.dumps(entry) + "\n") | |
| line_ids[dataset] += 1 | |
| # --------------------------- | |
| # Close files | |
| # --------------------------- | |
| for f in files.values(): | |
| f.close() | |
| print(f"✅ Saved entries to:") | |
| for k, path in output_paths.items(): | |
| print(f" {k}: {path} ({line_ids[k]-1} entries)") | |