MatchLab commited on
Commit
c94c8c9
·
verified ·
1 Parent(s): 2d2d947

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +12 -58
  2. .gitignore +8 -0
  3. LICENSE +21 -0
  4. POMA_BENCH/eval_scene_retrieval.py +275 -0
  5. POMA_BENCH/eval_view_retrieval.py +488 -0
  6. POMA_BENCH/job/internvl3_8b_fine_tune_coco_job_error_50985.txt +1 -0
  7. POMA_BENCH/job/internvl3_8b_fine_tune_coco_job_error_50989.txt +1 -0
  8. POMA_BENCH/job/internvl3_8b_fine_tune_coco_job_error_51020.txt +1 -0
  9. POMA_BENCH/job/internvl3_8b_fine_tune_coco_job_error_51021.txt +1 -0
  10. POMA_BENCH/job/internvl3_8b_fine_tune_coco_job_error_51022.txt +1 -0
  11. POMA_BENCH/job/internvl3_8b_fine_tune_coco_job_error_51029.txt +1 -0
  12. POMA_BENCH/job/internvl3_8b_fine_tune_coco_job_error_51042.txt +1 -0
  13. POMA_BENCH/job/internvl3_8b_fine_tune_coco_job_error_51044.txt +1 -0
  14. POMA_BENCH/job/internvl3_8b_fine_tune_coco_job_error_51045.txt +1 -0
  15. POMA_BENCH/job/internvl3_8b_fine_tune_coco_job_output_50985.txt +0 -0
  16. POMA_BENCH/job/internvl3_8b_fine_tune_coco_job_output_50989.txt +0 -0
  17. POMA_BENCH/job/internvl3_8b_fine_tune_coco_job_output_51020.txt +0 -0
  18. POMA_BENCH/job/internvl3_8b_fine_tune_coco_job_output_51021.txt +0 -0
  19. POMA_BENCH/job/internvl3_8b_fine_tune_coco_job_output_51022.txt +0 -0
  20. POMA_BENCH/job/internvl3_8b_fine_tune_coco_job_output_51029.txt +0 -0
  21. POMA_BENCH/job/internvl3_8b_fine_tune_coco_job_output_51042.txt +0 -0
  22. POMA_BENCH/job/internvl3_8b_fine_tune_coco_job_output_51044.txt +0 -0
  23. POMA_BENCH/job/internvl3_8b_fine_tune_coco_job_output_51045.txt +0 -0
  24. POMA_BENCH/nr3d_retrieval.jsonl +3 -0
  25. POMA_BENCH/ret.sh +36 -0
  26. POMA_BENCH/scanrefer_retrieval.jsonl +3 -0
  27. POMA_BENCH/scene_cap.json +0 -0
  28. POMA_BENCH/sr3d_retrieval.jsonl +3 -0
  29. POMA_BENCH/ssg_ref_total_by_view.jsonl +3 -0
  30. POMA_BENCH/ssg_ref_total_by_view_full.jsonl +3 -0
  31. assets/logo025.png +3 -0
  32. assets/overview.png +3 -0
  33. chamfer_rankings.json +3 -0
  34. common/__pycache__/box_utils.cpython-310.pyc +0 -0
  35. common/__pycache__/box_utils.cpython-39.pyc +0 -0
  36. common/__pycache__/dist_utils.cpython-310.pyc +0 -0
  37. common/__pycache__/dist_utils.cpython-39.pyc +0 -0
  38. common/__pycache__/io_utils.cpython-310.pyc +0 -0
  39. common/__pycache__/io_utils.cpython-39.pyc +0 -0
  40. common/__pycache__/launch_utils.cpython-310.pyc +0 -0
  41. common/__pycache__/launch_utils.cpython-313.pyc +0 -0
  42. common/__pycache__/launch_utils.cpython-39.pyc +0 -0
  43. common/__pycache__/misc.cpython-310.pyc +0 -0
  44. common/__pycache__/misc.cpython-39.pyc +0 -0
  45. common/__pycache__/type_utils.cpython-310.pyc +0 -0
  46. common/__pycache__/type_utils.cpython-39.pyc +0 -0
  47. common/box_utils.py +66 -0
  48. common/dist_utils.py +220 -0
  49. common/io_utils.py +133 -0
  50. common/launch_utils.py +121 -0
.gitattributes CHANGED
@@ -1,59 +1,13 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.lz4 filter=lfs diff=lfs merge=lfs -text
12
- *.mds filter=lfs diff=lfs merge=lfs -text
13
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
14
- *.model filter=lfs diff=lfs merge=lfs -text
15
- *.msgpack filter=lfs diff=lfs merge=lfs -text
16
- *.npy filter=lfs diff=lfs merge=lfs -text
17
- *.npz filter=lfs diff=lfs merge=lfs -text
18
- *.onnx filter=lfs diff=lfs merge=lfs -text
19
- *.ot filter=lfs diff=lfs merge=lfs -text
20
- *.parquet filter=lfs diff=lfs merge=lfs -text
21
- *.pb filter=lfs diff=lfs merge=lfs -text
22
- *.pickle filter=lfs diff=lfs merge=lfs -text
23
- *.pkl filter=lfs diff=lfs merge=lfs -text
24
- *.pt filter=lfs diff=lfs merge=lfs -text
25
- *.pth filter=lfs diff=lfs merge=lfs -text
26
- *.rar filter=lfs diff=lfs merge=lfs -text
27
  *.safetensors filter=lfs diff=lfs merge=lfs -text
28
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
29
- *.tar.* filter=lfs diff=lfs merge=lfs -text
30
- *.tar filter=lfs diff=lfs merge=lfs -text
31
- *.tflite filter=lfs diff=lfs merge=lfs -text
32
- *.tgz filter=lfs diff=lfs merge=lfs -text
33
- *.wasm filter=lfs diff=lfs merge=lfs -text
34
- *.xz filter=lfs diff=lfs merge=lfs -text
35
- *.zip filter=lfs diff=lfs merge=lfs -text
36
- *.zst filter=lfs diff=lfs merge=lfs -text
37
- *tfevents* filter=lfs diff=lfs merge=lfs -text
38
- # Audio files - uncompressed
39
- *.pcm filter=lfs diff=lfs merge=lfs -text
40
- *.sam filter=lfs diff=lfs merge=lfs -text
41
- *.raw filter=lfs diff=lfs merge=lfs -text
42
- # Audio files - compressed
43
- *.aac filter=lfs diff=lfs merge=lfs -text
44
- *.flac filter=lfs diff=lfs merge=lfs -text
45
- *.mp3 filter=lfs diff=lfs merge=lfs -text
46
- *.ogg filter=lfs diff=lfs merge=lfs -text
47
- *.wav filter=lfs diff=lfs merge=lfs -text
48
- # Image files - uncompressed
49
- *.bmp filter=lfs diff=lfs merge=lfs -text
50
- *.gif filter=lfs diff=lfs merge=lfs -text
51
- *.png filter=lfs diff=lfs merge=lfs -text
52
- *.tiff filter=lfs diff=lfs merge=lfs -text
53
- # Image files - compressed
54
- *.jpg filter=lfs diff=lfs merge=lfs -text
55
- *.jpeg filter=lfs diff=lfs merge=lfs -text
56
- *.webp filter=lfs diff=lfs merge=lfs -text
57
- # Video files - compressed
58
- *.mp4 filter=lfs diff=lfs merge=lfs -text
59
- *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  *.safetensors filter=lfs diff=lfs merge=lfs -text
2
+ POMA_BENCH/nr3d_retrieval.jsonl filter=lfs diff=lfs merge=lfs -text
3
+ POMA_BENCH/scanrefer_retrieval.jsonl filter=lfs diff=lfs merge=lfs -text
4
+ POMA_BENCH/sr3d_retrieval.jsonl filter=lfs diff=lfs merge=lfs -text
5
+ POMA_BENCH/ssg_ref_total_by_view.jsonl filter=lfs diff=lfs merge=lfs -text
6
+ POMA_BENCH/ssg_ref_total_by_view_full.jsonl filter=lfs diff=lfs merge=lfs -text
7
+ assets/logo025.png filter=lfs diff=lfs merge=lfs -text
8
+ assets/overview.png filter=lfs diff=lfs merge=lfs -text
9
+ chamfer_rankings.json filter=lfs diff=lfs merge=lfs -text
10
+ light_3rscan_chamfer_rankings.json filter=lfs diff=lfs merge=lfs -text
11
+ light_arkitscenes_chamfer_rankings.json filter=lfs diff=lfs merge=lfs -text
12
+ light_scannet_chamfer_rankings.json filter=lfs diff=lfs merge=lfs -text
13
+ scripts/job/internvl3_8b_fine_tune_coco_job_output_44548.txt filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ fg-clip-base/model.safetensors
2
+ PointMapVerse
3
+ results/
4
+ vis/
5
+ wandb/
6
+ lmms-finetune
7
+ cc3m_1M
8
+ captions
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2024 scene-verse
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
POMA_BENCH/eval_scene_retrieval.py ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import glob
3
+ import json
4
+ import argparse
5
+ from typing import Dict, List, Tuple
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+ import torch.nn.functional as F
10
+ from safetensors.torch import load_file
11
+ from transformers import AutoImageProcessor, AutoModelForCausalLM, AutoTokenizer
12
+ from huggingface_hub import hf_hub_download
13
+ from peft import LoraConfig, get_peft_model
14
+
15
+ # -----------------------------
16
+ # Utils
17
+ # -----------------------------
18
+ def load_json(path: str) -> dict:
19
+ with open(path, "r") as f:
20
+ return json.load(f)
21
+
22
+ def find_scan_safetensor(scan_root: str, scan_id: str) -> str:
23
+ direct = os.path.join(scan_root, f"{scan_id}.safetensors")
24
+ if os.path.exists(direct):
25
+ return direct
26
+
27
+ pattern = os.path.join(scan_root, "**", f"{scan_id}.safetensors")
28
+ matches = glob.glob(pattern, recursive=True)
29
+ if not matches:
30
+ raise FileNotFoundError(f"Cannot find safetensor for scan_id={scan_id} under {scan_root}")
31
+ matches = sorted(matches, key=len)
32
+ return matches[0]
33
+
34
+ def to_vchw(point_map: torch.Tensor) -> torch.Tensor:
35
+ """
36
+ Convert point_map to (V, 3, H, W) float tensor.
37
+ Accepts:
38
+ (V, 3, H, W)
39
+ (V, H, W, 3)
40
+ """
41
+ if point_map.dim() != 4:
42
+ raise ValueError(f"Expected 4D point_map, got shape={tuple(point_map.shape)}")
43
+
44
+ V, a, b, c = point_map.shape
45
+ if a == 3:
46
+ out = point_map
47
+ elif c == 3:
48
+ out = point_map.permute(0, 3, 1, 2).contiguous()
49
+ else:
50
+ raise ValueError(f"Unrecognized point_map layout: shape={tuple(point_map.shape)}")
51
+
52
+ return out.float()
53
+
54
+ def load_safetensor_from_hf(repo_id, filename, repo_type="dataset"):
55
+ cached_path = hf_hub_download(
56
+ repo_id=repo_id,
57
+ filename=filename,
58
+ repo_type=repo_type,
59
+ local_files_only=False
60
+ )
61
+ return load_file(cached_path)
62
+
63
+ def load_pretrain(model, pretrain_ckpt_path: str):
64
+ print(f"📂 Loading pretrained weights from: {str(pretrain_ckpt_path)}")
65
+
66
+ model_weight_path_pattern = os.path.join(pretrain_ckpt_path, "model*.safetensors")
67
+ model_weight_paths = glob.glob(model_weight_path_pattern)
68
+
69
+ if len(model_weight_paths) == 0:
70
+ raise FileNotFoundError(f"❌ Cannot find any model*.safetensors in {str(pretrain_ckpt_path)}")
71
+
72
+ weights = {}
73
+ for model_weight_path in model_weight_paths:
74
+ print(f"📥 Loading weights from: {model_weight_path}")
75
+ weights.update(load_file(model_weight_path, device="cpu"))
76
+
77
+ result = model.load_state_dict(weights, strict=False)
78
+
79
+ model_keys = set(model.state_dict().keys())
80
+ loaded_keys = model_keys.intersection(weights.keys())
81
+ print(f"✅ Loaded keys: {len(loaded_keys)} / {len(model_keys)}")
82
+ print(f"❌ Missing keys: {len(result.missing_keys)}")
83
+ print(f"⚠️ Unexpected keys: {len(result.unexpected_keys)}")
84
+
85
+
86
+ # -----------------------------
87
+ # Model wrapper
88
+ # -----------------------------
89
+ class RepModel(nn.Module):
90
+ def __init__(self, model_root: str = "fg-clip-base"):
91
+ super().__init__()
92
+
93
+ self.pm_encoder = AutoModelForCausalLM.from_pretrained(f'../{model_root}', trust_remote_code=True)
94
+ self.tokenizer = AutoTokenizer.from_pretrained(f'../{model_root}', trust_remote_code=True, use_fast=True)
95
+ self.image_processor = AutoImageProcessor.from_pretrained(f'../{model_root}')
96
+
97
+ # Optional: print trainable params
98
+ try:
99
+ self.pm_encoder.print_trainable_parameters()
100
+ except Exception:
101
+ pass
102
+
103
+ def encode_views_batched(self, pm_vchw: torch.Tensor, batch_views: int = 32) -> torch.Tensor:
104
+ """
105
+ pm_vchw: (V,3,H,W) on device
106
+ returns: (V,D) normalized
107
+ """
108
+ feats_all = []
109
+ V = pm_vchw.shape[0]
110
+ for s in range(0, V, batch_views):
111
+ chunk = pm_vchw[s : s + batch_views] # (b,3,H,W)
112
+ _, feats = self.pm_encoder.get_image_features(chunk)
113
+ feats = F.normalize(feats.float(), dim=-1)
114
+ feats_all.append(feats)
115
+ return torch.cat(feats_all, dim=0)
116
+
117
+ @torch.no_grad()
118
+ def encode_text(self, texts: List[str]) -> torch.Tensor:
119
+ """
120
+ texts: list[str]
121
+ returns: (B,D) normalized
122
+ """
123
+ tok = self.tokenizer(
124
+ texts,
125
+ padding="max_length",
126
+ truncation=True,
127
+ max_length=248,
128
+ return_tensors="pt",
129
+ ).to(next(self.parameters()).device)
130
+
131
+ feats = self.pm_encoder.get_text_features(tok["input_ids"], walk_short_pos=False)
132
+ feats = F.normalize(feats.float(), dim=-1)
133
+ return feats
134
+
135
+ # -----------------------------
136
+ # Scene retrieval
137
+ # -----------------------------
138
+ def build_queries_from_caption_json(caption_json: dict) -> List[dict]:
139
+ """
140
+ Convert:
141
+ { scene_id: { "captions": [c1,c2,...] }, ... }
142
+ into:
143
+ [ { "scene_id": scene_id, "caption": c }, ... ]
144
+ """
145
+ queries = []
146
+ for scene_id, payload in caption_json.items():
147
+ caps = payload.get("captions", [])
148
+ for c in caps:
149
+ c = (c or "").strip()
150
+ if c:
151
+ queries.append({"scene_id": scene_id, "caption": c})
152
+ return queries
153
+
154
+
155
+ @torch.no_grad()
156
+ def eval_scene_retrieval(
157
+ model: RepModel,
158
+ caption_json: dict,
159
+ scan_root: str,
160
+ device: str = "cuda",
161
+ batch_views: int = 32,
162
+ recall_ks: Tuple[int, ...] = (1, 5, 10),
163
+ ) -> Dict[str, float]:
164
+ """
165
+ For each caption, retrieve the correct scene among all scenes in caption_json.
166
+ Scene embedding = mean pooling over view embeddings.
167
+ """
168
+ model.eval().to(device)
169
+
170
+ scene_ids = sorted(list(caption_json.keys()))
171
+ if len(scene_ids) == 0:
172
+ return {"n": 0}
173
+
174
+ # Cache: scene_id -> pooled scene feature (D,) on CPU
175
+ scene_feat_cache: Dict[str, torch.Tensor] = {}
176
+
177
+ # Precompute all scene pooled features once (so retrieval is fast)
178
+ for sid in scene_ids:
179
+ filename = f'light_scannet/{sid}.safetensors'
180
+ data = load_safetensor_from_hf('MatchLab/ScenePoint', filename, repo_type="dataset")
181
+
182
+ pm = to_vchw(data["point_map"]) # (V,3,H,W) on CPU
183
+ pm = pm.to(device, non_blocking=True)
184
+
185
+ view_feats = model.encode_views_batched(pm, batch_views=batch_views) # (V,D) on GPU
186
+ scene_feat = view_feats.mean(dim=0) # (D,)
187
+ scene_feat = F.normalize(scene_feat, dim=-1)
188
+
189
+ scene_feat_cache[sid] = scene_feat.detach().cpu()
190
+
191
+ # Stack gallery: (N,D)
192
+ gallery = torch.stack([scene_feat_cache[sid] for sid in scene_ids], dim=0) # CPU
193
+ gallery = gallery.to(device)
194
+
195
+ # Build queries
196
+ queries = build_queries_from_caption_json(caption_json)
197
+
198
+ total = 0
199
+ top1_correct = 0
200
+ recall_correct = {k: 0 for k in recall_ks}
201
+
202
+ for q in queries:
203
+ gt_sid = q["scene_id"]
204
+ caption = q["caption"]
205
+
206
+ if gt_sid not in scene_feat_cache:
207
+ continue
208
+
209
+ text_feat = model.encode_text([caption])[0] # (D,) on GPU
210
+
211
+ # similarity over all scenes: (N,)
212
+ sims = gallery @ text_feat.unsqueeze(-1) # (N,1)
213
+ sims = sims.squeeze(-1)
214
+
215
+ ranked = torch.argsort(sims, descending=True) # indices into scene_ids
216
+ pred_sid = scene_ids[int(ranked[0].item())]
217
+
218
+ total += 1
219
+ if pred_sid == gt_sid:
220
+ top1_correct += 1
221
+
222
+ for k in recall_ks:
223
+ k_eff = min(k, len(scene_ids))
224
+ topk_idx = ranked[:k_eff].tolist()
225
+ topk_sids = [scene_ids[i] for i in topk_idx]
226
+ if gt_sid in topk_sids:
227
+ recall_correct[k] += 1
228
+
229
+ # optional debug print
230
+ print(f"[Q] GT={gt_sid} | Pred={pred_sid} | caption={caption[:80]}...")
231
+
232
+ if total == 0:
233
+ return {"n": 0}
234
+
235
+ out = {"n": total, "top1_acc": top1_correct / total}
236
+ for k in recall_ks:
237
+ out[f"recall@{k}"] = recall_correct[k] / total
238
+ return out
239
+
240
+
241
+ def main():
242
+ ap = argparse.ArgumentParser()
243
+ ap.add_argument("--caption_json", type=str, required=True, help="JSON mapping scene_id -> {captions:[...]}")
244
+ ap.add_argument("--scan_root", type=str, required=True, help="Root dir containing scene safetensors")
245
+ ap.add_argument("--ckpt", type=str, default="", help="Optional: dir with model*.safetensors")
246
+ ap.add_argument("--model_root", type=str, default="fg-clip-base")
247
+ ap.add_argument("--device", type=str, default="cuda")
248
+ ap.add_argument("--batch_views", type=int, default=32)
249
+ args = ap.parse_args()
250
+
251
+ caption_json = load_json(args.caption_json)
252
+
253
+ model = RepModel(model_root=args.model_root)
254
+ if args.ckpt:
255
+ load_pretrain(model, args.ckpt)
256
+
257
+ metrics = eval_scene_retrieval(
258
+ model=model,
259
+ caption_json=caption_json,
260
+ scan_root=args.scan_root,
261
+ device=args.device,
262
+ batch_views=args.batch_views,
263
+ recall_ks=(1, 5, 10),
264
+ )
265
+
266
+ print("\n=== Scene Retrieval Results ===")
267
+ for k, v in metrics.items():
268
+ if isinstance(v, float):
269
+ print(f"{k:>10}: {v:.4f}")
270
+ else:
271
+ print(f"{k:>10}: {v}")
272
+
273
+
274
+ if __name__ == "__main__":
275
+ main()
POMA_BENCH/eval_view_retrieval.py ADDED
@@ -0,0 +1,488 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import glob
3
+ import json
4
+ import argparse
5
+ from typing import Dict, List, Tuple
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+ import torch.nn.functional as F
10
+ from safetensors.torch import load_file
11
+ from huggingface_hub import hf_hub_download
12
+ from transformers import AutoImageProcessor, AutoModelForCausalLM, AutoTokenizer
13
+
14
+ # -----------------------------
15
+ # Utils
16
+ # -----------------------------
17
+ def load_jsonl(path: str) -> List[dict]:
18
+ data = []
19
+ with open(path, "r") as f:
20
+ for line in f:
21
+ line = line.strip()
22
+ if not line:
23
+ continue
24
+ data.append(json.loads(line))
25
+ return data
26
+
27
+
28
+ def load_safetensor_from_hf(repo_id, filename, repo_type="dataset"):
29
+ cached_path = hf_hub_download(
30
+ repo_id=repo_id,
31
+ filename=filename,
32
+ repo_type=repo_type,
33
+ local_files_only=False
34
+ )
35
+ return load_file(cached_path)
36
+
37
+
38
+ def to_vchw(point_map: torch.Tensor) -> torch.Tensor:
39
+ """
40
+ Convert point_map to (V, 3, H, W) float tensor.
41
+ Accepts common layouts:
42
+ (V, 3, H, W) -> ok
43
+ (V, H, W, 3) -> permute
44
+ (V, H, W, C) where C=3 -> permute
45
+ """
46
+ if point_map.dim() != 4:
47
+ raise ValueError(f"Expected point_map to be 4D (V,*,*,*), got shape={tuple(point_map.shape)}")
48
+
49
+ V, a, b, c = point_map.shape
50
+
51
+ # (V, 3, H, W)
52
+ if a == 3:
53
+ out = point_map
54
+ # (V, H, W, 3)
55
+ elif c == 3:
56
+ out = point_map.permute(0, 3, 1, 2).contiguous()
57
+ else:
58
+ raise ValueError(f"Unrecognized point_map layout: shape={tuple(point_map.shape)}")
59
+
60
+ return out.float()
61
+
62
+
63
+ def load_pretrain(model, pretrain_ckpt_path):
64
+ print(f"📂 Loading pretrained weights from: {str(pretrain_ckpt_path)}")
65
+
66
+ # Search for safetensors files
67
+ model_weight_path_pattern = pretrain_ckpt_path + "/model*.safetensors"
68
+ model_weight_paths = glob.glob(model_weight_path_pattern)
69
+
70
+ if len(model_weight_paths) == 0:
71
+ raise FileNotFoundError(f"❌ Cannot find any .safetensors file in {str(pretrain_ckpt_path)}")
72
+
73
+ # Load and merge weights
74
+ weights = {}
75
+ for model_weight_path in model_weight_paths:
76
+ print(f"📥 Loading weights from: {model_weight_path}")
77
+ weights.update(load_file(model_weight_path, device="cpu"))
78
+
79
+ # Load weights with strict=False
80
+ result = model.load_state_dict(weights, strict=False)
81
+
82
+ model_keys = set(model.state_dict().keys())
83
+ loaded_keys = model_keys.intersection(weights.keys())
84
+ missing_keys = result.missing_keys
85
+ unexpected_keys = result.unexpected_keys
86
+ print(f"✅ Loaded keys: {len(loaded_keys)} / {len(model_keys)}")
87
+ print(f"❌ Missing keys: {len(missing_keys)}")
88
+ print(f"⚠️ Unexpected keys: {len(unexpected_keys)}")
89
+
90
+ class _GlobalViewAttnBlock(nn.Module):
91
+ """One pre-norm Transformer-style block over view tokens (B,V,D)."""
92
+ def __init__(
93
+ self,
94
+ dim: int,
95
+ num_heads: int,
96
+ mlp_ratio: float,
97
+ dropout: float,
98
+ zero_init_residual: bool,
99
+ zero_init_attn_out: bool,
100
+ ):
101
+ super().__init__()
102
+ self.zero_init_residual = zero_init_residual
103
+ self.zero_init_attn_out = zero_init_attn_out
104
+
105
+ self.norm1 = nn.LayerNorm(dim)
106
+ self.attn = nn.MultiheadAttention(
107
+ embed_dim=dim,
108
+ num_heads=num_heads,
109
+ dropout=dropout,
110
+ batch_first=True,
111
+ bias=True,
112
+ )
113
+
114
+ self.norm2 = nn.LayerNorm(dim)
115
+ hidden_dim = int(dim * mlp_ratio)
116
+ self.mlp = nn.Sequential(
117
+ nn.Linear(dim, hidden_dim),
118
+ nn.GELU(),
119
+ nn.Dropout(dropout),
120
+ nn.Linear(hidden_dim, dim),
121
+ nn.Dropout(dropout),
122
+ )
123
+
124
+ self._init_weights()
125
+
126
+ def forward(self, x, key_padding_mask=None):
127
+ h = self.norm1(x)
128
+ attn_out, _ = self.attn(
129
+ h, h, h,
130
+ key_padding_mask=key_padding_mask,
131
+ need_weights=False,
132
+ )
133
+ x = x + attn_out
134
+ x = x + self.mlp(self.norm2(x))
135
+ return x
136
+
137
+ @torch.no_grad()
138
+ def _init_weights(self):
139
+ # LayerNorm
140
+ for ln in (self.norm1, self.norm2):
141
+ nn.init.ones_(ln.weight)
142
+ nn.init.zeros_(ln.bias)
143
+
144
+ # MultiheadAttention: in_proj for qkv (3D, D)
145
+ if getattr(self.attn, "in_proj_weight", None) is not None:
146
+ nn.init.xavier_uniform_(self.attn.in_proj_weight)
147
+ if getattr(self.attn, "in_proj_bias", None) is not None:
148
+ nn.init.zeros_(self.attn.in_proj_bias)
149
+
150
+ # out proj
151
+ nn.init.xavier_uniform_(self.attn.out_proj.weight)
152
+ if self.attn.out_proj.bias is not None:
153
+ nn.init.zeros_(self.attn.out_proj.bias)
154
+
155
+ # optional: start attn residual near-zero
156
+ if self.zero_init_attn_out:
157
+ nn.init.zeros_(self.attn.out_proj.weight)
158
+ if self.attn.out_proj.bias is not None:
159
+ nn.init.zeros_(self.attn.out_proj.bias)
160
+
161
+ # MLP
162
+ fc1: nn.Linear = self.mlp[0]
163
+ fc2: nn.Linear = self.mlp[3]
164
+
165
+ nn.init.xavier_uniform_(fc1.weight)
166
+ if fc1.bias is not None:
167
+ nn.init.zeros_(fc1.bias)
168
+
169
+ # zero-init last projection for stable residual start (recommended)
170
+ if self.zero_init_residual:
171
+ nn.init.zeros_(fc2.weight)
172
+ if fc2.bias is not None:
173
+ nn.init.zeros_(fc2.bias)
174
+ else:
175
+ nn.init.xavier_uniform_(fc2.weight)
176
+ if fc2.bias is not None:
177
+ nn.init.zeros_(fc2.bias)
178
+
179
+ class _GlobalViewGatedAttnBlock(nn.Module):
180
+ """Pre-norm Transformer block over view tokens (B,V,D) with gated residuals."""
181
+ def __init__(
182
+ self,
183
+ dim: int,
184
+ num_heads: int,
185
+ mlp_ratio: float,
186
+ dropout: float,
187
+ zero_init_residual: bool,
188
+ zero_init_attn_out: bool,
189
+ gate_bias_init: float = -2.0, # sigmoid(-2)≈0.12, starts near-identity (small updates)
190
+ ):
191
+ super().__init__()
192
+ self.zero_init_residual = zero_init_residual
193
+ self.zero_init_attn_out = zero_init_attn_out
194
+
195
+ self.norm1 = nn.LayerNorm(dim)
196
+ self.attn = nn.MultiheadAttention(
197
+ embed_dim=dim,
198
+ num_heads=num_heads,
199
+ dropout=dropout,
200
+ batch_first=True,
201
+ bias=True,
202
+ )
203
+
204
+ # --- Gating for attention residual ---
205
+ # Produces per-token, per-channel gates in (0,1)
206
+ self.attn_gate = nn.Linear(dim, dim, bias=True)
207
+
208
+ self.norm2 = nn.LayerNorm(dim)
209
+ hidden_dim = int(dim * mlp_ratio)
210
+ self.mlp = nn.Sequential(
211
+ nn.Linear(dim, hidden_dim),
212
+ nn.GELU(),
213
+ nn.Dropout(dropout),
214
+ nn.Linear(hidden_dim, dim),
215
+ nn.Dropout(dropout),
216
+ )
217
+
218
+ # --- Gating for MLP residual ---
219
+ self.mlp_gate = nn.Linear(dim, dim, bias=True)
220
+
221
+ self._init_weights(gate_bias_init=gate_bias_init)
222
+
223
+ def forward(self, x: torch.Tensor, key_padding_mask=None) -> torch.Tensor:
224
+ # x: (B, V, D)
225
+ h1 = self.norm1(x)
226
+ attn_out, _ = self.attn(
227
+ h1, h1, h1,
228
+ key_padding_mask=key_padding_mask,
229
+ need_weights=False,
230
+ )
231
+ g_attn = torch.sigmoid(self.attn_gate(h1)) # (B, V, D)
232
+ x = x + g_attn * attn_out
233
+
234
+ h2 = self.norm2(x)
235
+ mlp_out = self.mlp(h2)
236
+ g_mlp = torch.sigmoid(self.mlp_gate(h2)) # (B, V, D)
237
+ x = x + g_mlp * mlp_out
238
+ return x
239
+
240
+ @torch.no_grad()
241
+ def _init_weights(self, gate_bias_init: float):
242
+ # LayerNorm
243
+ for ln in (self.norm1, self.norm2):
244
+ nn.init.ones_(ln.weight)
245
+ nn.init.zeros_(ln.bias)
246
+
247
+ # MultiheadAttention: in_proj for qkv
248
+ if getattr(self.attn, "in_proj_weight", None) is not None:
249
+ nn.init.xavier_uniform_(self.attn.in_proj_weight)
250
+ if getattr(self.attn, "in_proj_bias", None) is not None:
251
+ nn.init.zeros_(self.attn.in_proj_bias)
252
+
253
+ # out proj
254
+ nn.init.xavier_uniform_(self.attn.out_proj.weight)
255
+ if self.attn.out_proj.bias is not None:
256
+ nn.init.zeros_(self.attn.out_proj.bias)
257
+
258
+ # optional: start attn residual near-zero
259
+ if self.zero_init_attn_out:
260
+ nn.init.zeros_(self.attn.out_proj.weight)
261
+ if self.attn.out_proj.bias is not None:
262
+ nn.init.zeros_(self.attn.out_proj.bias)
263
+
264
+ # MLP
265
+ fc1: nn.Linear = self.mlp[0]
266
+ fc2: nn.Linear = self.mlp[3]
267
+ nn.init.xavier_uniform_(fc1.weight)
268
+ if fc1.bias is not None:
269
+ nn.init.zeros_(fc1.bias)
270
+
271
+ if self.zero_init_residual:
272
+ nn.init.zeros_(fc2.weight)
273
+ if fc2.bias is not None:
274
+ nn.init.zeros_(fc2.bias)
275
+ else:
276
+ nn.init.xavier_uniform_(fc2.weight)
277
+ if fc2.bias is not None:
278
+ nn.init.zeros_(fc2.bias)
279
+
280
+ # Gates: start “mostly closed” so training is stable, then learn to open
281
+ nn.init.zeros_(self.attn_gate.weight)
282
+ nn.init.constant_(self.attn_gate.bias, gate_bias_init)
283
+
284
+ nn.init.zeros_(self.mlp_gate.weight)
285
+ nn.init.constant_(self.mlp_gate.bias, gate_bias_init)
286
+
287
+ class GlobalViewAttention(nn.Module):
288
+ """
289
+ Multi-layer global self-attention over multi-view tokens.
290
+
291
+ Input: x ∈ (B, V, D)
292
+ Output: x' ∈ (B, V, D)
293
+ """
294
+ def __init__(
295
+ self,
296
+ dim: int,
297
+ num_layers: int = 1,
298
+ num_heads: int = 8,
299
+ mlp_ratio: float = 4.0,
300
+ dropout: float = 0.0,
301
+ zero_init_residual: bool = True, # recommended (stable when adding layers)
302
+ zero_init_attn_out: bool = False, # optional extra safety
303
+ ):
304
+ super().__init__()
305
+ assert num_layers >= 1, "num_layers must be >= 1"
306
+
307
+ self.dim = dim
308
+ self.num_layers = num_layers
309
+ self.num_heads = num_heads
310
+ self.layers = nn.ModuleList([
311
+ _GlobalViewAttnBlock(
312
+ dim=dim,
313
+ num_heads=num_heads,
314
+ mlp_ratio=mlp_ratio,
315
+ dropout=dropout,
316
+ zero_init_residual=zero_init_residual,
317
+ zero_init_attn_out=zero_init_attn_out,
318
+ )
319
+ for _ in range(num_layers)
320
+ ])
321
+
322
+ def forward(self, x, key_padding_mask=None):
323
+ """
324
+ x: (B, V, D)
325
+ key_padding_mask: (B, V), True = ignore (padding)
326
+ """
327
+ for layer in self.layers:
328
+ x = layer(x, key_padding_mask=key_padding_mask)
329
+ return x
330
+
331
+ class RepModel(nn.Module):
332
+ def __init__(self, model_root: str = "fg-clip-base"):
333
+ super().__init__()
334
+
335
+ self.pm_encoder = AutoModelForCausalLM.from_pretrained(f'../{model_root}', trust_remote_code=True)
336
+ # self.global_attn = GlobalViewAttention(dim=512, num_heads=8, mlp_ratio=4.0, dropout=0.1)
337
+ self.tokenizer = AutoTokenizer.from_pretrained(f'../{model_root}', trust_remote_code=True, use_fast=True)
338
+ self.image_processor = AutoImageProcessor.from_pretrained(f'../{model_root}')
339
+
340
+ # Optional: print trainable params
341
+ try:
342
+ self.pm_encoder.print_trainable_parameters()
343
+ except Exception:
344
+ pass
345
+
346
+ @torch.no_grad()
347
+ def encode_views(self, pm_batched):
348
+ # Expect (1,V,3,H,W) or (V,3,H,W)
349
+ # pm_batched = self.image_processor(images=pm_batched, return_tensors="pt").to('cuda')
350
+ _, feats = self.pm_encoder.get_image_features(pm_batched)
351
+ # feats = self.global_attn(feats)
352
+ feats = torch.nn.functional.normalize(feats.float(), dim=-1)
353
+ return feats
354
+
355
+ @torch.no_grad()
356
+ def encode_text(self, texts):
357
+ tok = self.tokenizer(texts, padding="max_length", truncation=True, max_length=248, return_tensors="pt").to('cuda')
358
+ feats = self.pm_encoder.get_text_features(tok["input_ids"], walk_short_pos=False)
359
+ feats = torch.nn.functional.normalize(feats.float(), dim=-1)
360
+ return feats
361
+
362
+
363
+ # -----------------------------
364
+ # Retrieval evaluation
365
+ # -----------------------------
366
+ @torch.no_grad()
367
+ def eval_view_retrieval(
368
+ model: RepModel,
369
+ items: List[dict],
370
+ scan_root: str,
371
+ device: str = "cuda",
372
+ batch_views: int = 32,
373
+ recall_ks: Tuple[int, ...] = (1, 5, 10),
374
+ ) -> Dict[str, float]:
375
+ model.eval()
376
+ model.to(device)
377
+
378
+ # Cache: scan_id -> (V, D) view features
379
+ scan_cache: Dict[str, torch.Tensor] = {}
380
+
381
+ total = 0
382
+ top1_correct = 0
383
+ recall_correct = {k: 0 for k in recall_ks}
384
+
385
+ for it in items:
386
+ scan_id = it["scan_id"]
387
+ utter = it["utterance"]
388
+ gt_views = it.get("view_ground_truth", None)
389
+ if not gt_views:
390
+ continue
391
+ gt = int(gt_views[0]) # "the first of the view gt"
392
+
393
+ # Load / cache view features for this scan
394
+ if scan_id not in scan_cache:
395
+ filename = f'light_scannet/{scan_id}.safetensors'
396
+ data = load_safetensor_from_hf('MatchLab/ScenePoint', filename, repo_type="dataset")
397
+
398
+ # if "point_map" not in data:
399
+ # raise KeyError(f"{st_path} does not contain key 'point_map'. keys={list(data.keys())}")
400
+
401
+ pm = to_vchw(data["point_map"]) # (V, 3, H, W)
402
+ # pm = data['color_images']
403
+
404
+ V = pm.shape[0]
405
+
406
+ feats = model.encode_views(pm.to(device, non_blocking=True)) # (chunk, D)
407
+ scan_cache[scan_id] = feats # (V, D) on CPU
408
+
409
+ view_feats = scan_cache[scan_id] # (V, D), CPU
410
+ V = view_feats.shape[0]
411
+ if gt < 0 or gt >= V:
412
+ # skip invalid gt index
413
+ continue
414
+
415
+ # Encode text
416
+ text_feat = model.encode_text(utter).squeeze(0).unsqueeze(-1) # (D,)
417
+
418
+ # Similarity: (V,)
419
+ sims = (view_feats @ text_feat).squeeze(-1)
420
+
421
+ # rank views by similarity (high -> low)
422
+ ranked = torch.argsort(sims, descending=True)
423
+
424
+ pred = int(ranked[0].item())
425
+ total += 1
426
+
427
+ if pred == gt:
428
+ top1_correct += 1
429
+ else:
430
+ # per-sample print (optional)
431
+ print(f"GT: {gt}, Pred: {pred}, Utterance: {utter}")
432
+
433
+ # Recall@K
434
+ for k in recall_ks:
435
+ k_eff = min(k, V)
436
+ if (ranked[:k_eff] == gt).any().item():
437
+ recall_correct[k] += 1
438
+
439
+ # ----- after the loop -----
440
+ out = {}
441
+ if total == 0:
442
+ return {"n": 0}
443
+
444
+ out["n"] = total
445
+ out["top1_acc"] = top1_correct / total
446
+ for k in recall_ks:
447
+ out[f"recall@{k}"] = recall_correct[k] / total
448
+
449
+ return out
450
+
451
+ def main():
452
+ ap = argparse.ArgumentParser()
453
+ ap.add_argument("--jsonl", type=str, required=True, help="SR3D-style jsonl file")
454
+ ap.add_argument("--scan_root", type=str, required=True, help="Root dir containing scan safetensors")
455
+ ap.add_argument("--ckpt", type=str, default="", help="Optional: path to .pth/.pt or dir with model*.safetensors")
456
+ ap.add_argument("--model_root", type=str, default="fg-clip-base")
457
+ ap.add_argument("--device", type=str, default="cuda")
458
+ ap.add_argument("--batch_views", type=int, default=32)
459
+ ap.add_argument("--max_items", type=int, default=-1)
460
+ args = ap.parse_args()
461
+
462
+ items = load_jsonl(args.jsonl)
463
+ if args.max_items > 0:
464
+ items = items[: args.max_items]
465
+
466
+ model = RepModel(model_root=args.model_root)
467
+ if args.ckpt:
468
+ load_pretrain(model, args.ckpt)
469
+
470
+ metrics = eval_view_retrieval(
471
+ model=model,
472
+ items=items,
473
+ scan_root=args.scan_root,
474
+ device=args.device,
475
+ batch_views=args.batch_views,
476
+ recall_ks=(1, 5, 10),
477
+ )
478
+
479
+ print("\n=== View Retrieval Results ===")
480
+ for k, v in metrics.items():
481
+ if isinstance(v, float):
482
+ print(f"{k:>10}: {v:.4f}")
483
+ else:
484
+ print(f"{k:>10}: {v}")
485
+
486
+
487
+ if __name__ == "__main__":
488
+ main()
POMA_BENCH/job/internvl3_8b_fine_tune_coco_job_error_50985.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ Using a slow image processor as `use_fast` is unset and a slow processor was saved with this model. `use_fast=True` will be the default behavior in v4.52, even if the model was saved with a slow processor. This will result in minor differences in outputs. You'll still be able to use a slow processor with `use_fast=False`.
POMA_BENCH/job/internvl3_8b_fine_tune_coco_job_error_50989.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ Using a slow image processor as `use_fast` is unset and a slow processor was saved with this model. `use_fast=True` will be the default behavior in v4.52, even if the model was saved with a slow processor. This will result in minor differences in outputs. You'll still be able to use a slow processor with `use_fast=False`.
POMA_BENCH/job/internvl3_8b_fine_tune_coco_job_error_51020.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ Using a slow image processor as `use_fast` is unset and a slow processor was saved with this model. `use_fast=True` will be the default behavior in v4.52, even if the model was saved with a slow processor. This will result in minor differences in outputs. You'll still be able to use a slow processor with `use_fast=False`.
POMA_BENCH/job/internvl3_8b_fine_tune_coco_job_error_51021.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ Using a slow image processor as `use_fast` is unset and a slow processor was saved with this model. `use_fast=True` will be the default behavior in v4.52, even if the model was saved with a slow processor. This will result in minor differences in outputs. You'll still be able to use a slow processor with `use_fast=False`.
POMA_BENCH/job/internvl3_8b_fine_tune_coco_job_error_51022.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ Using a slow image processor as `use_fast` is unset and a slow processor was saved with this model. `use_fast=True` will be the default behavior in v4.52, even if the model was saved with a slow processor. This will result in minor differences in outputs. You'll still be able to use a slow processor with `use_fast=False`.
POMA_BENCH/job/internvl3_8b_fine_tune_coco_job_error_51029.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ Using a slow image processor as `use_fast` is unset and a slow processor was saved with this model. `use_fast=True` will be the default behavior in v4.52, even if the model was saved with a slow processor. This will result in minor differences in outputs. You'll still be able to use a slow processor with `use_fast=False`.
POMA_BENCH/job/internvl3_8b_fine_tune_coco_job_error_51042.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ Using a slow image processor as `use_fast` is unset and a slow processor was saved with this model. `use_fast=True` will be the default behavior in v4.52, even if the model was saved with a slow processor. This will result in minor differences in outputs. You'll still be able to use a slow processor with `use_fast=False`.
POMA_BENCH/job/internvl3_8b_fine_tune_coco_job_error_51044.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ Using a slow image processor as `use_fast` is unset and a slow processor was saved with this model. `use_fast=True` will be the default behavior in v4.52, even if the model was saved with a slow processor. This will result in minor differences in outputs. You'll still be able to use a slow processor with `use_fast=False`.
POMA_BENCH/job/internvl3_8b_fine_tune_coco_job_error_51045.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ Using a slow image processor as `use_fast` is unset and a slow processor was saved with this model. `use_fast=True` will be the default behavior in v4.52, even if the model was saved with a slow processor. This will result in minor differences in outputs. You'll still be able to use a slow processor with `use_fast=False`.
POMA_BENCH/job/internvl3_8b_fine_tune_coco_job_output_50985.txt ADDED
The diff for this file is too large to render. See raw diff
 
POMA_BENCH/job/internvl3_8b_fine_tune_coco_job_output_50989.txt ADDED
The diff for this file is too large to render. See raw diff
 
POMA_BENCH/job/internvl3_8b_fine_tune_coco_job_output_51020.txt ADDED
The diff for this file is too large to render. See raw diff
 
POMA_BENCH/job/internvl3_8b_fine_tune_coco_job_output_51021.txt ADDED
The diff for this file is too large to render. See raw diff
 
POMA_BENCH/job/internvl3_8b_fine_tune_coco_job_output_51022.txt ADDED
The diff for this file is too large to render. See raw diff
 
POMA_BENCH/job/internvl3_8b_fine_tune_coco_job_output_51029.txt ADDED
The diff for this file is too large to render. See raw diff
 
POMA_BENCH/job/internvl3_8b_fine_tune_coco_job_output_51042.txt ADDED
The diff for this file is too large to render. See raw diff
 
POMA_BENCH/job/internvl3_8b_fine_tune_coco_job_output_51044.txt ADDED
The diff for this file is too large to render. See raw diff
 
POMA_BENCH/job/internvl3_8b_fine_tune_coco_job_output_51045.txt ADDED
The diff for this file is too large to render. See raw diff
 
POMA_BENCH/nr3d_retrieval.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:296564ae94258af94f09a5dcf02f660314e438ff53136688e4322bb1e6a18700
3
+ size 22955141
POMA_BENCH/ret.sh ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # #!/bin/bash
2
+ # #SBATCH --job-name=eval
3
+ # #SBATCH --output=job/internvl3_8b_fine_tune_coco_job_output_%j.txt
4
+ # #SBATCH --error=job/internvl3_8b_fine_tune_coco_job_error_%j.txt
5
+ # #SBATCH --time=20:00:00
6
+ # #SBATCH --ntasks=1
7
+ # #SBATCH --partition=camera-long
8
+ # #SBATCH --ntasks-per-node=1
9
+ # #SBATCH --nodes=1
10
+ # #SBATCH --gres=gpu:h100:1
11
+ # #SBATCH --mail-type=ALL
12
+ # #SBATCH --mail-user=ym621@ic.ac.uk
13
+ # #SBATCH --qos=normal
14
+
15
+ # cd /mnt/data-alpha-sg-01/team-camera/home/m50048399/transfered/ye_project/Project2/POMA_BENCH
16
+ # /home/m50048399/anaconda3/bin/conda init bash
17
+ # source ~/.bashrc
18
+ # conda activate sceneverse
19
+ # export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True
20
+
21
+ python eval_view_retrieval.py \
22
+ --jsonl sr3d_retrieval.jsonl \
23
+ --scan_root /mnt/new_drive/retreival/light_scannet/light_scannet \
24
+ --device cuda \
25
+ --ckpt /home/m50048399/transfered/ye_project/Project2/results/sceneverse_scannet_exp1_b64_Pretrain_all_scannet_training_run1/2026-01-19-23:46:36.901933/ckpt/ckpt_20.pth \
26
+ --batch_views 32
27
+
28
+ # python eval_scene_retrieval.py \
29
+ # --caption_json scene_cap.json \
30
+ # --scan_root /mnt/new_drive/retreival/light_scannet/light_scannet \
31
+ # --model_root fg-clip-base \
32
+ # --ckpt /home/m50048399/transfered/ye_project/Project2/results/sceneverse_scannet_exp1_b128_Pretrain_all_scannet_training_run1/withgeoalign/ckpt/ckpt_100.pth \
33
+ # --batch_views 32
34
+
35
+
36
+
POMA_BENCH/scanrefer_retrieval.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77be567de421d2f8593ac04906ff3afaa3a635832b7d55d6c93041d0599d5390
3
+ size 27430061
POMA_BENCH/scene_cap.json ADDED
The diff for this file is too large to render. See raw diff
 
POMA_BENCH/sr3d_retrieval.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9acfb7d5773fbbfc42536e5511bc4fb5890444eb682afe446a9e6e8208abd604
3
+ size 48231881
POMA_BENCH/ssg_ref_total_by_view.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ea1c18e33e8f2255f202599f5596841370c608480e675e8e538020173d02e1e
3
+ size 10972907
POMA_BENCH/ssg_ref_total_by_view_full.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5842e5d224db9aafbee803363ed38218f4fc4adb4e8ac7b9855c853a68875362
3
+ size 36000916
assets/logo025.png ADDED

Git LFS Details

  • SHA256: 26b0012c75b78a0e1cd3c44bd5715f1faf149eca4334daa3fdd67c0a44892ad5
  • Pointer size: 132 Bytes
  • Size of remote file: 1.38 MB
assets/overview.png ADDED

Git LFS Details

  • SHA256: ba4e2a232e56914ec7cacff5bfd293cd5bd73753df379ff26fe2bf545c9b5b52
  • Pointer size: 132 Bytes
  • Size of remote file: 6.97 MB
chamfer_rankings.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6044fbe2930aa318090e012887efaa65d2fb48075bc95ef3bc3c661ee9c4afa
3
+ size 76539228
common/__pycache__/box_utils.cpython-310.pyc ADDED
Binary file (2.38 kB). View file
 
common/__pycache__/box_utils.cpython-39.pyc ADDED
Binary file (2.33 kB). View file
 
common/__pycache__/dist_utils.cpython-310.pyc ADDED
Binary file (6.19 kB). View file
 
common/__pycache__/dist_utils.cpython-39.pyc ADDED
Binary file (6.16 kB). View file
 
common/__pycache__/io_utils.cpython-310.pyc ADDED
Binary file (5.47 kB). View file
 
common/__pycache__/io_utils.cpython-39.pyc ADDED
Binary file (5.4 kB). View file
 
common/__pycache__/launch_utils.cpython-310.pyc ADDED
Binary file (4.79 kB). View file
 
common/__pycache__/launch_utils.cpython-313.pyc ADDED
Binary file (8.23 kB). View file
 
common/__pycache__/launch_utils.cpython-39.pyc ADDED
Binary file (4.79 kB). View file
 
common/__pycache__/misc.cpython-310.pyc ADDED
Binary file (4.41 kB). View file
 
common/__pycache__/misc.cpython-39.pyc ADDED
Binary file (4.36 kB). View file
 
common/__pycache__/type_utils.cpython-310.pyc ADDED
Binary file (1.42 kB). View file
 
common/__pycache__/type_utils.cpython-39.pyc ADDED
Binary file (1.34 kB). View file
 
common/box_utils.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+
4
+ def box3d_iou(corners1, corners2):
5
+ ''' Compute 3D bounding box IoU.
6
+
7
+ Input:
8
+ corners1: numpy array (8,3), assume up direction is Z
9
+ corners2: numpy array (8,3), assume up direction is Z
10
+ Output:
11
+ iou: 3D bounding box IoU
12
+
13
+ '''
14
+ x_min_1, x_max_1, y_min_1, y_max_1, z_min_1, z_max_1 = get_box3d_min_max(corners1)
15
+ x_min_2, x_max_2, y_min_2, y_max_2, z_min_2, z_max_2 = get_box3d_min_max(corners2)
16
+ xA = np.maximum(x_min_1, x_min_2)
17
+ yA = np.maximum(y_min_1, y_min_2)
18
+ zA = np.maximum(z_min_1, z_min_2)
19
+ xB = np.minimum(x_max_1, x_max_2)
20
+ yB = np.minimum(y_max_1, y_max_2)
21
+ zB = np.minimum(z_max_1, z_max_2)
22
+ inter_vol = np.maximum((xB - xA), 0) * np.maximum((yB - yA), 0) * np.maximum((zB - zA), 0)
23
+ box_vol_1 = (x_max_1 - x_min_1) * (y_max_1 - y_min_1) * (z_max_1 - z_min_1)
24
+ box_vol_2 = (x_max_2 - x_min_2) * (y_max_2 - y_min_2) * (z_max_2 - z_min_2)
25
+ iou = inter_vol / (box_vol_1 + box_vol_2 - inter_vol + 1e-8)
26
+
27
+ return iou
28
+
29
+
30
+ def get_box3d_min_max(corner):
31
+ ''' Compute min and max coordinates for 3D bounding box
32
+ Note: only for axis-aligned bounding boxes
33
+
34
+ Input:
35
+ corners: numpy array (8,3), assume up direction is Z (batch of N samples)
36
+ Output:
37
+ box_min_max: an array for min and max coordinates of 3D bounding box IoU
38
+
39
+ '''
40
+ min_coord = corner.min(axis=0)
41
+ max_coord = corner.max(axis=0)
42
+ x_min, x_max = min_coord[0], max_coord[0]
43
+ y_min, y_max = min_coord[1], max_coord[1]
44
+ z_min, z_max = min_coord[2], max_coord[2]
45
+
46
+ return x_min, x_max, y_min, y_max, z_min, z_max
47
+
48
+
49
+ def get_3d_box(center, box_size):
50
+ ''' box_size is array(l,w,h), heading_angle is radius clockwise from pos x axis, center is xyz of box center
51
+ output (8,3) array for 3D box cornders
52
+ Similar to utils/compute_orientation_3d
53
+ '''
54
+ l,w,h = box_size
55
+ # x_corners = [l/2,l/2,-l/2,-l/2,l/2,l/2,-l/2,-l/2]
56
+ # y_corners = [h/2,h/2,h/2,h/2,-h/2,-h/2,-h/2,-h/2]
57
+ # z_corners = [w/2,-w/2,-w/2,w/2,w/2,-w/2,-w/2,w/2]
58
+ x_corners = [l/2,l/2,-l/2,-l/2,l/2,l/2,-l/2,-l/2]
59
+ y_corners = [w/2,-w/2,-w/2,w/2,w/2,-w/2,-w/2,w/2]
60
+ z_corners = [h/2,h/2,h/2,h/2,-h/2,-h/2,-h/2,-h/2]
61
+ corners_3d = np.vstack([x_corners,y_corners,z_corners])
62
+ corners_3d[0,:] = corners_3d[0,:] + center[0]
63
+ corners_3d[1,:] = corners_3d[1,:] + center[1]
64
+ corners_3d[2,:] = corners_3d[2,:] + center[2]
65
+ corners_3d = np.transpose(corners_3d)
66
+ return corners_3d
common/dist_utils.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import pickle
3
+ import torch
4
+ import torch.distributed as dist
5
+
6
+ import logging
7
+ logger = logging.getLogger(__name__)
8
+
9
+ ########################### Basic utility for distributed info ################################
10
+
11
+ def is_dist_avail_and_initialized():
12
+ if not dist.is_available():
13
+ return False
14
+ if not dist.is_initialized():
15
+ return False
16
+ return True
17
+
18
+
19
+ def get_rank():
20
+ """
21
+ Get the rank of the current process.
22
+ """
23
+ if not is_dist_avail_and_initialized():
24
+ return 0
25
+ return dist.get_rank()
26
+
27
+
28
+ def get_world_size():
29
+ """
30
+ Get the size of the world.
31
+ """
32
+ if not is_dist_avail_and_initialized():
33
+ return 1
34
+ return dist.get_world_size()
35
+
36
+
37
+ def is_master_proc(num_gpus=8):
38
+ """
39
+ Determines if the current process is the master process on each node.
40
+ """
41
+ if is_dist_avail_and_initialized():
42
+ return dist.get_rank() % num_gpus == 0
43
+ else:
44
+ return True
45
+
46
+
47
+ def is_root_proc():
48
+ """
49
+ Determines if the current process is the root process.
50
+ """
51
+ if is_dist_avail_and_initialized():
52
+ return dist.get_rank() == 0
53
+ else:
54
+ return True
55
+
56
+
57
+ ############################## Data gathering across devices ##################################
58
+
59
+ def _serialize_to_tensor(data, group, max_size=1024):
60
+ """
61
+ Serialize the tensor to ByteTensor. Note that only `gloo` and `nccl`
62
+ backend is supported.
63
+ Args:
64
+ data (data): data to be serialized.
65
+ group (group): pytorch dist group.
66
+ Returns:
67
+ tensor (ByteTensor): tensor that serialized.
68
+ """
69
+ backend = dist.get_backend(group)
70
+ assert backend in ["gloo", "nccl"]
71
+ device = torch.device("cpu" if backend == "gloo" else "cuda")
72
+
73
+ buffer = pickle.dumps(data)
74
+ if len(buffer) > max_size ** 3:
75
+ logger.warning(
76
+ "Rank {} trying to all-gather {:.2f} GB of data on device {}".format(
77
+ get_rank(), len(buffer) / (max_size ** 3), device
78
+ )
79
+ )
80
+ storage = torch.ByteStorage.from_buffer(buffer)
81
+ tensor = torch.ByteTensor(storage).to(device=device)
82
+ return tensor
83
+
84
+
85
+ def _pad_to_largest_tensor(tensor, group):
86
+ """
87
+ Padding all the tensors from different GPUs to the largest ones.
88
+ Args:
89
+ tensor (tensor): tensor to pad.
90
+ group (group): pytorch dist group.
91
+ Returns:
92
+ list[int]: size of the tensor, on each rank
93
+ Tensor: padded tensor that has the max size
94
+ """
95
+ world_size = dist.get_world_size(group=group)
96
+ assert (
97
+ world_size >= 1
98
+ ), "comm.gather/all_gather must be called from ranks within the given group!"
99
+ local_size = torch.tensor(
100
+ [tensor.numel()], dtype=torch.int64, device=tensor.device
101
+ )
102
+ size_list = [
103
+ torch.zeros([1], dtype=torch.int64, device=tensor.device)
104
+ for _ in range(world_size)
105
+ ]
106
+ dist.all_gather(size_list, local_size, group=group)
107
+ size_list = [int(size.item()) for size in size_list]
108
+
109
+ max_size = max(size_list)
110
+
111
+ # we pad the tensor because torch all_gather does not support
112
+ # gathering tensors of different shapes
113
+ if local_size != max_size:
114
+ padding = torch.zeros(
115
+ (max_size - local_size,), dtype=torch.uint8, device=tensor.device
116
+ )
117
+ tensor = torch.cat((tensor, padding), dim=0)
118
+ return size_list, tensor
119
+
120
+
121
+ def broadcast(object):
122
+ if isinstance(object, torch.Tensor):
123
+ dist.broadcast(tensor=object, src=0)
124
+ else:
125
+ sync_tensor = torch.Tensor([object]).cuda()
126
+ dist.broadcast(tensor=sync_tensor, src=0)
127
+ object = sync_tensor[0].item()
128
+ return object
129
+
130
+
131
+ def all_gather(tensors):
132
+ """
133
+ All gathers the provided tensors from all processes across machines.
134
+ Args:
135
+ tensors (list): tensors to perform all gather across all processes in
136
+ all machines.
137
+ """
138
+ gather_list = []
139
+ output_tensor = []
140
+ world_size = dist.get_world_size()
141
+ for tensor in tensors:
142
+ tensor_placeholder = [
143
+ torch.ones_like(tensor) for _ in range(world_size)
144
+ ]
145
+ dist.all_gather(tensor_placeholder, tensor, async_op=False)
146
+ gather_list.append(tensor_placeholder)
147
+ for gathered_tensor in gather_list:
148
+ output_tensor.append(torch.cat(gathered_tensor, dim=0))
149
+ return output_tensor
150
+
151
+
152
+ def all_reduce(tensors, average=True):
153
+ """
154
+ All reduce the provided tensors from all processes across machines.
155
+ Args:
156
+ tensors (list): tensors to perform all reduce across all processes in
157
+ all machines.
158
+ average (bool): scales the reduced tensor by the number of overall
159
+ processes across all machines.
160
+ """
161
+ for tensor in tensors:
162
+ dist.all_reduce(tensor, async_op=False)
163
+ if average:
164
+ world_size = dist.get_world_size()
165
+ for tensor in tensors:
166
+ tensor.mul_(1.0 / world_size)
167
+ return tensors
168
+
169
+
170
+ @functools.lru_cache()
171
+ def _get_global_gloo_group():
172
+ """
173
+ Return a process group based on gloo backend, containing all the ranks
174
+ The result is cached.
175
+ Returns:
176
+ (group): pytorch dist group.
177
+ """
178
+ if dist.get_backend() == "nccl":
179
+ return dist.new_group(backend="gloo")
180
+ else:
181
+ return dist.group.WORLD
182
+
183
+
184
+ def all_gather_unaligned(data, group=None):
185
+ """
186
+ Run all_gather on arbitrary picklable data (not necessarily tensors).
187
+
188
+ Args:
189
+ data: any picklable object
190
+ group: a torch process group. By default, will use a group which
191
+ contains all ranks on gloo backend.
192
+
193
+ Returns:
194
+ list[data]: list of data gathered from each rank
195
+ """
196
+ if get_world_size() == 1:
197
+ return [data]
198
+ if group is None:
199
+ group = _get_global_gloo_group()
200
+ if dist.get_world_size(group) == 1:
201
+ return [data]
202
+
203
+ tensor = _serialize_to_tensor(data, group)
204
+
205
+ size_list, tensor = _pad_to_largest_tensor(tensor, group)
206
+ max_size = max(size_list)
207
+
208
+ # receiving Tensor from all ranks
209
+ tensor_list = [
210
+ torch.empty((max_size,), dtype=torch.uint8, device=tensor.device)
211
+ for _ in size_list
212
+ ]
213
+ dist.all_gather(tensor_list, tensor, group=group)
214
+
215
+ data_list = []
216
+ for size, tensor in zip(size_list, tensor_list):
217
+ buffer = tensor.cpu().numpy().tobytes()[:size]
218
+ data_list.append(pickle.loads(buffer))
219
+
220
+ return data_list
common/io_utils.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import pickle
3
+ import json
4
+ import cv2
5
+ import yaml
6
+ import numpy as np
7
+ from pathlib import Path
8
+ import torch
9
+ import open3d
10
+ from plyfile import PlyData
11
+
12
+ def make_dir(dir_path):
13
+ if not Path(dir_path).exists():
14
+ Path(dir_path).mkdir(parents=True, exist_ok=True)
15
+
16
+
17
+ def load_imgs(img_paths, option=cv2.IMREAD_COLOR):
18
+ imgs = [cv2.imread(img_path, option) for img_path in img_paths]
19
+ return imgs
20
+
21
+
22
+ def load_pickle(filename):
23
+ with Path(filename).open("rb") as f:
24
+ return pickle.load(f)
25
+
26
+
27
+ def save_pickle(data, filename):
28
+ with Path(filename).open("wb") as f:
29
+ pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
30
+
31
+
32
+ def load_json(filename):
33
+ with Path(filename).open("rb") as f:
34
+ return json.load(f)
35
+
36
+
37
+ def save_json(data, filename, save_pretty=True, sort_keys=False):
38
+ with Path(filename).open("w") as f:
39
+ if save_pretty:
40
+ f.write(json.dumps(data, indent=4, sort_keys=sort_keys))
41
+ else:
42
+ json.dump(data, f)
43
+
44
+
45
+ def load_jsonl(filename):
46
+ with Path(filename).open("r") as f:
47
+ return [json.loads(l.strip("\n")) for l in f.readlines()]
48
+
49
+
50
+ def save_jsonl(data, filename):
51
+ with Path(filename).open("w") as f:
52
+ f.write("\n".join([json.dumps(e) for e in data]))
53
+
54
+
55
+ def load_yaml(filename):
56
+ with Path(filename).open("r") as f:
57
+ return yaml.load(f, Loader=yaml.SafeLoader)
58
+
59
+
60
+ def save_yaml(data, filename):
61
+ with Path(filename).open("w") as f:
62
+ yaml.dump(data, f, default_flow_style=False)
63
+
64
+
65
+ def load_csv(filename, delimiter=","):
66
+ idx2key = None
67
+ contents = {}
68
+ with Path(filename).open("r") as f:
69
+ reader = csv.reader(f, delimiter=delimiter)
70
+ for l_idx, row in reader:
71
+ if l_idx == 0:
72
+ idx2key = row
73
+ for k_idx, key in enumerate(idx2key):
74
+ contents[key] = []
75
+ else:
76
+ for c_idx, col in enumerate(row):
77
+ contents[idx2key[c_idx]].append(col)
78
+ return contents, idx2key
79
+
80
+
81
+ def save_csv(data, filename, cols=None, delimiter=","):
82
+ with Path(filename).open("w") as f:
83
+ writer = csv.writer(f, delimiter=delimiter)
84
+ num_entries = len(data[list(data.keys())[0]])
85
+ assert cols is not None, "Must have column names for dumping csv files."
86
+ writer.writerow(cols)
87
+ for l_idx in range(num_entries):
88
+ row = [data[key][l_idx] for key in cols]
89
+ writer.writerow(row)
90
+
91
+
92
+ def load_numpy(filename):
93
+ return np.load(filename, allow_pickle=True)
94
+
95
+
96
+ def save_numpy(data, filename):
97
+ np.save(filename, data, allow_pickle=True)
98
+
99
+
100
+ def load_tensor(filename):
101
+ return torch.load(filename)
102
+
103
+
104
+ def save_tensor(data, filename):
105
+ torch.save(data, filename)
106
+
107
+
108
+ def load_ply(filepath):
109
+ with open(filepath, "rb") as f:
110
+ plydata = PlyData.read(f)
111
+ data = plydata.elements[0].data
112
+ coords = np.array([data["x"], data["y"], data["z"]], dtype=np.float32).T
113
+ feats = None
114
+ labels = None
115
+ if ({"red", "green", "blue"} - set(data.dtype.names)) == set():
116
+ feats = np.array([data["red"], data["green"], data["blue"]], dtype=np.uint8).T
117
+ if "label" in data.dtype.names:
118
+ labels = np.array(data["label"], dtype=np.uint32)
119
+ return coords, feats, labels
120
+
121
+
122
+ def load_ply_with_normals(filepath):
123
+ mesh = open3d.io.read_triangle_mesh(str(filepath))
124
+ if not mesh.has_vertex_normals():
125
+ mesh.compute_vertex_normals()
126
+ vertices = np.asarray(mesh.vertices)
127
+ normals = np.asarray(mesh.vertex_normals)
128
+
129
+ coords, feats, labels = load_ply(filepath)
130
+ assert np.allclose(coords, vertices), "different coordinates"
131
+ feats = np.hstack((feats, normals))
132
+
133
+ return coords, feats, labels
common/launch_utils.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+ import subprocess
4
+
5
+ import submitit
6
+
7
+
8
+ huggingface_fix = f"TRANSFORMERS_OFFLINE=1 CURL_CA_BUNDLE=''"
9
+
10
+
11
+ class SubmititLauncher:
12
+ def __init__(self, args):
13
+ self.args = args
14
+
15
+ def __call__(self):
16
+ host_name = os.popen(
17
+ "scontrol show hostnames $SLURM_JOB_NODELIST"
18
+ ).read().split("\n")[0]
19
+ self._set_gpu_args()
20
+ # Using Accelerate for launching
21
+ multi_gpu = "--multi_gpu" if self.args.num_nodes * self.args.gpu_per_node > 1 else ""
22
+ opts = " ".join(self.args.opts) if len(self.args.opts) > 0 else ""
23
+ opts += f" num_gpu={self.args.num_nodes * self.args.gpu_per_node} "
24
+ full_cfg_path = Path(self.args.config)
25
+ cfg_path, cfg_file = str(full_cfg_path.parent), str(full_cfg_path.name)
26
+ cmd = f"{huggingface_fix} accelerate launch --num_machines {self.args.num_nodes} \
27
+ --mixed_precision {self.args.mixed_precision} {multi_gpu} \
28
+ --num_processes {self.args.gpu_per_node * self.args.num_nodes} \
29
+ --num_cpu_threads_per_process {self.args.cpu_per_task} \
30
+ --main_process_ip {host_name} \
31
+ --main_process_port {self.args.port} \
32
+ --machine_rank {self.args.node_id} \
33
+ --dynamo_backend no \
34
+ {self.args.run_file} \
35
+ --config-path {cfg_path} \
36
+ --config-name {cfg_file} \
37
+ num_gpu={self.args.num_nodes * self.args.gpu_per_node} \
38
+ hydra.run.dir=. \
39
+ hydra.output_subdir=null \
40
+ hydra/job_logging=disabled \
41
+ hydra/hydra_logging=disabled {opts}"
42
+ subprocess.run(cmd, shell=True)
43
+
44
+ def _set_gpu_args(self):
45
+ job_env = submitit.JobEnvironment()
46
+ self.args.job_dir = str(self.args.job_dir).replace("%j", job_env.job_id)
47
+ self.args.node_id = int(job_env.global_rank / self.args.gpu_per_node)
48
+
49
+
50
+ def submitit_launch(args):
51
+ """
52
+ Multi node script launching with Submitit
53
+ """
54
+ additional_parameters = {}
55
+ if args.nodelist != "":
56
+ # if specifying node id
57
+ nodelist = f"{str(args.nodelist)}"
58
+ additional_parameters["nodelist"] = nodelist
59
+
60
+ executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30)
61
+ executor.update_parameters(
62
+ name=args.name,
63
+ mem_gb=args.mem_per_gpu * args.gpu_per_node * args.num_nodes,
64
+ gpus_per_node=args.gpu_per_node,
65
+ tasks_per_node=1,
66
+ cpus_per_task=args.gpu_per_node * args.cpu_per_task,
67
+ nodes=args.num_nodes,
68
+ slurm_qos=args.qos,
69
+ slurm_partition=args.partition,
70
+ slurm_account=args.account,
71
+ slurm_time=args.time * 60,
72
+ slurm_signal_delay_s=120,
73
+ slurm_additional_parameters=additional_parameters
74
+ )
75
+ launcher = SubmititLauncher(args)
76
+ job = executor.submit(launcher)
77
+ print(f"submitted job: {job.job_id}")
78
+
79
+
80
+ def accelerate_launch(args):
81
+ """
82
+ Single node script launching with Accelerate
83
+ """
84
+ opts = " ".join(args.opts) if len(args.opts) > 0 else ""
85
+ opts += f" num_gpu={args.num_nodes * args.gpu_per_node} "
86
+ multi_gpu = "--multi_gpu" if args.num_nodes * args.gpu_per_node > 1 else ""
87
+ full_cfg_path = Path(args.config)
88
+ cfg_path, cfg_file = str(full_cfg_path.parent), str(full_cfg_path.name)
89
+ cmd = f"{huggingface_fix} accelerate launch --num_machines {args.num_nodes} \
90
+ {multi_gpu} \
91
+ --mixed_precision {args.mixed_precision} \
92
+ --num_processes {args.gpu_per_node * args.num_nodes} \
93
+ --num_cpu_threads_per_process {args.cpu_per_task} \
94
+ --dynamo_backend no \
95
+ {args.run_file} \
96
+ --config-path {cfg_path} \
97
+ --config-name {cfg_file} \
98
+ num_gpu={args.num_nodes * args.gpu_per_node} \
99
+ hydra.run.dir=. \
100
+ hydra.output_subdir=null \
101
+ hydra/job_logging=disabled \
102
+ hydra/hydra_logging=disabled {opts}"
103
+ subprocess.run(cmd, shell=True)
104
+
105
+
106
+ def python_launch(args):
107
+ """
108
+ Vanilla python launcher for degbugging purposes
109
+ """
110
+ opts = " ".join(args.opts) if len(args.opts) > 0 else ""
111
+ full_cfg_path = Path(args.config)
112
+ cfg_path, cfg_file = str(full_cfg_path.parent), str(full_cfg_path.name)
113
+ cmd = f"{huggingface_fix} python {args.run_file} " \
114
+ f"--config-path {cfg_path} " \
115
+ f"--config-name {cfg_file} " \
116
+ f"num_gpu=1 " \
117
+ f"hydra.run.dir=. " \
118
+ f"hydra.output_subdir=null " \
119
+ f"hydra/job_logging=disabled " \
120
+ f"hydra/hydra_logging=disabled {opts}"
121
+ subprocess.run(cmd, shell=True)