# import os # import json # import torch # from huggingface_hub import list_repo_files, hf_hub_download # from safetensors.torch import load_file # REPO_ID = "MatchLab/PointMapVerse" # SUBFOLDER = "light_arkitscenes" # def pointmap_to_points(pm: torch.Tensor, max_points: int = 4096) -> torch.Tensor: # """ # Convert [H, W, 3] point map to [N, 3] points, with subsampling. # """ # pm = pm.float() # pts = pm.reshape(-1, 3) # pts = pts[pts.norm(dim=-1) > 0] # remove zero points # if pts.shape[0] > max_points: # pts = pts[torch.randperm(pts.shape[0])[:max_points]] # return pts # def chamfer_distance(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor: # """ # Symmetric Chamfer distance between two point sets a, b: [N,3], [M,3] # """ # d = torch.cdist(a, b, p=2) # return d.min(dim=1).values.mean() + d.min(dim=0).values.mean() # def compute_chamfer_rankings_for_scene(scene_path: str, repo_id: str): # """ # For a single safetensors scene file, compute sorted Chamfer rankings # for each view. # Returns a list of dicts for JSON. # """ # local_path = hf_hub_download(repo_id, scene_path, repo_type="dataset") # data = load_file(local_path) # point_maps = data["point_map"] # [V, H, W, 3] # num_views = point_maps.shape[0] # # Precompute point clouds per view # pcs = [pointmap_to_points(point_maps[v]) for v in range(num_views)] # scene_id = os.path.basename(scene_path).split(".")[0] # e.g. scene0000_01 # results = [] # for i in range(num_views): # pts_i = pcs[i] # dists = [] # for j in range(num_views): # if i == j: # continue # d = chamfer_distance(pts_i, pcs[j]).item() # dists.append((j, d)) # # sort by Chamfer distance # dists_sorted = sorted(dists, key=lambda x: x[1]) # sorted_views = [j for (j, _) in dists_sorted] # item = { # "scene_id": scene_id, # "cur_view": i, # "sorted_views": sorted_views, # } # results.append( # item # ) # print(item) # return results # def load_all_scenes_and_dump_json( # repo_id: str = REPO_ID, # subfolder: str = SUBFOLDER, # output_json: str = "light_arkitscenes_chamfer_rankings.json", # ): # print("๐Ÿ“‚ Listing filesโ€ฆ") # files = list_repo_files(repo_id, repo_type="dataset") # # Filter for safetensors inside subfolder/ # scene_files = [f for f in files if f.startswith(subfolder) and f.endswith(".safetensors")] # print(f"๐Ÿ” Found {len(scene_files)} scenes") # all_entries = [] # for fname in sorted(scene_files): # print(f"โžก๏ธ Processing scene: {fname}") # scene_entries = compute_chamfer_rankings_for_scene(fname, repo_id) # all_entries.extend(scene_entries) # print(f"๐Ÿ’พ Saving JSON to: {output_json}") # with open(output_json, "w") as f: # json.dump(all_entries, f, indent=2) # print("โœ… Finished computing Chamfer rankings for all scenes") # return all_entries # if __name__ == "__main__": # load_all_scenes_and_dump_json(REPO_ID, SUBFOLDER) # from transformers import AutoModel, AutoTokenizer # text_encoder = AutoModel.from_pretrained('jinaai/jina-clip-v2', trust_remote_code=True) # tokenizer = AutoTokenizer.from_pretrained('jinaai/jina-clip-v2', trust_remote_code=True) # from safetensors.torch import load_file # from safetensors.torch import load # from huggingface_hub import hf_hub_download # def load_safetensor_from_hf(repo_id, filename, repo_type="dataset"): # if filename.startswith('cc3m'): # with open(filename, "rb") as f: # return load(f.read()) # else: # cached_path = hf_hub_download( # repo_id=repo_id, # filename=filename, # repo_type=repo_type, # local_files_only=True # ) # return load_file(cached_path) from huggingface_hub import snapshot_download from pathlib import Path # ---- Config ---- REPO_ID = "MatchLab/ScenePointv2" # e.g. "openai/light_lsun" SUBFOLDER = "light_lsun" LOCAL_DIR = "./light_lsun" # ---- Download ---- snapshot_download( repo_id=REPO_ID, repo_type="dataset", local_dir=LOCAL_DIR, allow_patterns=[f"{SUBFOLDER}/**"], local_dir_use_symlinks=False, ) print(f"โœ… Downloaded {SUBFOLDER} to {Path(LOCAL_DIR).resolve()}")