ArchEGraph commited on
Commit
eecbf34
·
0 Parent(s):

Initial commit after history reset

Browse files
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ scale-hf-logo.png filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ auto_evals/
2
+ venv/
3
+ __pycache__/
4
+ .env
5
+ .ipynb_checkpoints
6
+ *ipynb
7
+ .vscode/
8
+
9
+ eval-queue/
10
+ eval-results/
11
+ eval-queue-bk/
12
+ eval-results-bk/
13
+ logs/
.pre-commit-config.yaml ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ default_language_version:
16
+ python: python3
17
+
18
+ ci:
19
+ autofix_prs: true
20
+ autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions'
21
+ autoupdate_schedule: quarterly
22
+
23
+ repos:
24
+ - repo: https://github.com/pre-commit/pre-commit-hooks
25
+ rev: v4.3.0
26
+ hooks:
27
+ - id: check-yaml
28
+ - id: check-case-conflict
29
+ - id: detect-private-key
30
+ - id: check-added-large-files
31
+ args: ['--maxkb=1000']
32
+ - id: requirements-txt-fixer
33
+ - id: end-of-file-fixer
34
+ - id: trailing-whitespace
35
+
36
+ - repo: https://github.com/PyCQA/isort
37
+ rev: 5.12.0
38
+ hooks:
39
+ - id: isort
40
+ name: Format imports
41
+
42
+ - repo: https://github.com/psf/black
43
+ rev: 22.12.0
44
+ hooks:
45
+ - id: black
46
+ name: Format code
47
+ additional_dependencies: ['click==8.0.2']
48
+
49
+ - repo: https://github.com/charliermarsh/ruff-pre-commit
50
+ # Ruff version.
51
+ rev: 'v0.0.267'
52
+ hooks:
53
+ - id: ruff
Makefile ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .PHONY: style format
2
+
3
+
4
+ style:
5
+ python -m black --line-length 119 .
6
+ python -m isort .
7
+ ruff check --fix .
8
+
9
+
10
+ quality:
11
+ python -m black --check --line-length 119 .
12
+ python -m isort --check-only .
13
+ ruff check .
README.md ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: ArchEGraph Visualizer
3
+ colorFrom: blue
4
+ colorTo: green
5
+ sdk: gradio
6
+ app_file: app.py
7
+ pinned: false
8
+ ---
9
+
10
+ # ArchEGraph Visualizer
11
+
12
+ This Space visualizes sample files from:
13
+
14
+ - https://huggingface.co/datasets/ArchEGraph/ArchEGraph-demo
15
+
16
+ The UI flow is:
17
+
18
+ - Select weather city first
19
+ - Select a matched building id for that city
20
+ - Optionally enable a custom time window for weather and energy curves
21
+
22
+ For the selected weather/building pair, the app fetches:
23
+
24
+ - `geometry/<building_id>.npz`
25
+ - `building/<building_id>.npz`
26
+ - `weather/<weather_id>.npz`
27
+ - `energy/<sample_id>.npz`
28
+
29
+ Then it renders four views:
30
+
31
+ - Geometry polygons
32
+ - Building graph
33
+ - Weather curves (selected time window)
34
+ - Energy curves (selected time window)
35
+
36
+ ## API
37
+
38
+ The Gradio API endpoint is exposed as:
39
+
40
+ - `/gradio_api/call/render_sample`
41
+
42
+ Input data format:
43
+
44
+ ```json
45
+ {
46
+ "weather_id": "Anchorage",
47
+ "building_id": "17",
48
+ "max_energy_zones": 6,
49
+ "use_custom_window": false,
50
+ "window_start_hour": 1,
51
+ "window_hours": 24
52
+ }
53
+ ```
54
+
55
+ See `agents.md` for curl examples.
app.py ADDED
@@ -0,0 +1,401 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import csv
4
+ import random
5
+ import time
6
+ from dataclasses import dataclass
7
+ from functools import lru_cache
8
+ from pathlib import Path
9
+
10
+ import gradio as gr
11
+ from huggingface_hub import hf_hub_download
12
+
13
+ from visualization.building import visualize_graph, visualize_graph_overlap
14
+ from visualization.energy import visualize_energy
15
+ from visualization.geometry import visualize_geometry
16
+ from visualization.weather import visualize_weather
17
+
18
+ DATASET_REPO_ID = "ArchEGraph/ArchEGraph-demo"
19
+ OUTPUT_ROOT = Path("/tmp/archegraph_outputs")
20
+ DEFAULT_ENERGY_ZONE_INDEX = 0
21
+ DEFAULT_WEATHER_WINDOW_START_HOUR = 1
22
+ DEFAULT_WEATHER_WINDOW_HOURS = 24
23
+
24
+
25
+ @dataclass(frozen=True)
26
+ class SampleRecord:
27
+ sample_id: str
28
+ weather_id: str
29
+ building_id: str
30
+ energy_file: str
31
+ n_steps: int
32
+ n_spaces: int
33
+
34
+
35
+ @lru_cache(maxsize=1)
36
+ def _manifest_index() -> dict[str, SampleRecord]:
37
+ manifest_path = hf_hub_download(
38
+ repo_id=DATASET_REPO_ID,
39
+ repo_type="dataset",
40
+ filename="manifest.csv",
41
+ )
42
+
43
+ out: dict[str, SampleRecord] = {}
44
+ with Path(manifest_path).open("r", encoding="utf-8", newline="") as f:
45
+ reader = csv.DictReader(f)
46
+ for row in reader:
47
+ sample_id = (row.get("sample_id") or "").strip()
48
+ weather_id = (row.get("weather_id") or "").strip()
49
+ building_id = (row.get("building_id") or "").strip()
50
+ energy_file = (row.get("energy_file") or "").strip()
51
+ if not sample_id or not weather_id or not energy_file:
52
+ continue
53
+
54
+ if not building_id:
55
+ building_id = sample_id.split("__", 1)[0].lstrip("0") or "0"
56
+
57
+ n_steps = int(row.get("n_steps") or 0)
58
+ n_spaces = int(row.get("n_spaces") or 0)
59
+ out[sample_id] = SampleRecord(
60
+ sample_id=sample_id,
61
+ weather_id=weather_id,
62
+ building_id=building_id,
63
+ energy_file=energy_file,
64
+ n_steps=n_steps,
65
+ n_spaces=n_spaces,
66
+ )
67
+
68
+ if not out:
69
+ raise RuntimeError("manifest.csv is empty or invalid.")
70
+
71
+ return out
72
+
73
+
74
+ def _numeric_sort_key(value: str) -> tuple[int, str]:
75
+ text = (value or "").strip()
76
+ if text.isdigit():
77
+ return (0, f"{int(text):09d}")
78
+ return (1, text.lower())
79
+
80
+
81
+ @lru_cache(maxsize=1)
82
+ def _weather_to_buildings() -> dict[str, list[str]]:
83
+ mapping: dict[str, set[str]] = {}
84
+ for rec in _manifest_index().values():
85
+ mapping.setdefault(rec.weather_id, set()).add(rec.building_id)
86
+
87
+ return {k: sorted(v, key=_numeric_sort_key) for k, v in mapping.items()}
88
+
89
+
90
+ @lru_cache(maxsize=1)
91
+ def _pair_to_sample() -> dict[tuple[str, str], SampleRecord]:
92
+ out: dict[tuple[str, str], SampleRecord] = {}
93
+ for rec in _manifest_index().values():
94
+ out[(rec.weather_id, rec.building_id)] = rec
95
+ return out
96
+
97
+
98
+ def _weather_choices() -> list[str]:
99
+ return sorted(_weather_to_buildings().keys(), key=lambda x: x.lower())
100
+
101
+
102
+ def _building_choices(weather_id: str) -> list[str]:
103
+ return _weather_to_buildings().get((weather_id or "").strip(), [])
104
+
105
+
106
+ def _default_selection() -> tuple[str, str]:
107
+ weather_choices = _weather_choices()
108
+ if not weather_choices:
109
+ raise RuntimeError("No weather options found in manifest")
110
+
111
+ weather = weather_choices[0]
112
+ buildings = _building_choices(weather)
113
+ if not buildings:
114
+ raise RuntimeError(f"No building options for weather '{weather}'")
115
+ return weather, buildings[0]
116
+
117
+
118
+ def _safe_dropdown_defaults() -> tuple[list[str], str | None, list[str], str | None]:
119
+ try:
120
+ weather_choices = _weather_choices()
121
+ if not weather_choices:
122
+ return [], None, [], None
123
+ weather, building = _default_selection()
124
+ building_choices = _building_choices(weather)
125
+ return weather_choices, weather, building_choices, building
126
+ except Exception:
127
+ return [], None, [], None
128
+
129
+
130
+ def _resolve_record(weather_id: str, building_id: str) -> SampleRecord:
131
+ weather = (weather_id or "").strip()
132
+ building = (building_id or "").strip()
133
+ pair_map = _pair_to_sample()
134
+ rec = pair_map.get((weather, building))
135
+ if rec is not None:
136
+ return rec
137
+
138
+ weather_opts = _weather_choices()
139
+ if not weather_opts:
140
+ raise ValueError("No weather options available")
141
+ if weather not in weather_opts:
142
+ raise ValueError(f"Unknown weather city '{weather}'. Example values: {', '.join(weather_opts[:8])}")
143
+
144
+ building_opts = _building_choices(weather)
145
+ raise ValueError(
146
+ f"Unknown building id '{building}' for city '{weather}'. "
147
+ f"Available building ids: {', '.join(building_opts[:12])}"
148
+ )
149
+
150
+
151
+ def _download_modalities(record: SampleRecord) -> tuple[Path, Path, Path, Path]:
152
+ building_key = record.sample_id.split("__", 1)[0]
153
+
154
+ geometry_npz = hf_hub_download(
155
+ repo_id=DATASET_REPO_ID,
156
+ repo_type="dataset",
157
+ filename=f"geometry/{building_key}.npz",
158
+ )
159
+ graph_npz = hf_hub_download(
160
+ repo_id=DATASET_REPO_ID,
161
+ repo_type="dataset",
162
+ filename=f"building/{building_key}.npz",
163
+ )
164
+ weather_npz = hf_hub_download(
165
+ repo_id=DATASET_REPO_ID,
166
+ repo_type="dataset",
167
+ filename=f"weather/{record.weather_id}.npz",
168
+ )
169
+ energy_npz = hf_hub_download(
170
+ repo_id=DATASET_REPO_ID,
171
+ repo_type="dataset",
172
+ filename=f"energy/{record.energy_file}",
173
+ )
174
+
175
+ return Path(geometry_npz), Path(graph_npz), Path(weather_npz), Path(energy_npz)
176
+
177
+
178
+ def _output_paths(sample_id: str) -> tuple[Path, Path, Path, Path, Path]:
179
+ OUTPUT_ROOT.mkdir(parents=True, exist_ok=True)
180
+ safe = sample_id.replace("/", "_").replace("\\", "_")
181
+ run_dir = OUTPUT_ROOT / f"{safe}_{int(time.time() * 1000)}"
182
+ run_dir.mkdir(parents=True, exist_ok=True)
183
+
184
+ return (
185
+ run_dir / "geometry.png",
186
+ run_dir / "graph.png",
187
+ run_dir / "overlap.png",
188
+ run_dir / "weather.png",
189
+ run_dir / "energy.png",
190
+ )
191
+
192
+
193
+ def update_building_dropdown(weather_id: str) -> gr.update:
194
+ buildings = _building_choices(weather_id)
195
+ value = buildings[0] if buildings else None
196
+ return gr.update(choices=buildings, value=value)
197
+
198
+
199
+ def render_sample(
200
+ weather_id: str,
201
+ building_id: str,
202
+ energy_zone_index: int,
203
+ use_custom_window: bool,
204
+ window_start_hour: int,
205
+ window_hours: int,
206
+ ) -> tuple[str, str, str, str, str, str]:
207
+ record = _resolve_record(weather_id=weather_id, building_id=building_id)
208
+
209
+ if use_custom_window:
210
+ start_hour = max(1, int(window_start_hour))
211
+ hours = max(1, int(window_hours))
212
+ else:
213
+ start_hour = DEFAULT_WEATHER_WINDOW_START_HOUR
214
+ hours = DEFAULT_WEATHER_WINDOW_HOURS
215
+
216
+ zone_idx = max(0, int(energy_zone_index)) if energy_zone_index is not None else DEFAULT_ENERGY_ZONE_INDEX
217
+
218
+ try:
219
+ geometry_npz, graph_npz, weather_npz, energy_npz = _download_modalities(record)
220
+ out_geometry, out_graph, out_overlap, out_weather, out_energy = _output_paths(record.sample_id)
221
+
222
+ visualize_geometry(geometry_npz=geometry_npz, output_png=out_geometry)
223
+ visualize_graph(graph_npz=graph_npz, geometry_npz=geometry_npz, output_png=out_graph)
224
+ visualize_graph_overlap(graph_npz=graph_npz, geometry_npz=geometry_npz, energy_npz=energy_npz, output_png=out_overlap)
225
+ visualize_weather(
226
+ weather_npz=weather_npz,
227
+ output_png=out_weather,
228
+ start_hour=start_hour,
229
+ window_hours=hours,
230
+ )
231
+ visualize_energy(
232
+ energy_npz=energy_npz,
233
+ output_png=out_energy,
234
+ zone_index=zone_idx,
235
+ start_hour=start_hour,
236
+ window_hours=hours,
237
+ )
238
+
239
+ except Exception as exc:
240
+ raise gr.Error(f"Failed to render sample {record.sample_id}: {exc}") from exc
241
+
242
+ if use_custom_window:
243
+ window_text = f"custom window: start={start_hour}, hours={hours}"
244
+ else:
245
+ window_text = "default window: Jan-1 first 24 hours"
246
+
247
+ summary = (
248
+ f"Rendered **{record.sample_id}** from `{DATASET_REPO_ID}` \n"
249
+ f"weather_id: `{record.weather_id}` \n"
250
+ f"building_id: `{record.building_id}` \n"
251
+ f"n_steps: `{record.n_steps}` \n"
252
+ f"n_spaces: `{record.n_spaces}` \n"
253
+ f"zone_index: `{zone_idx}` \n"
254
+ f"{window_text}"
255
+ )
256
+
257
+ return str(out_geometry), str(out_graph), str(out_overlap), str(out_weather), str(out_energy), summary
258
+
259
+
260
+ def pick_random_sample() -> tuple[str, gr.update]:
261
+ rec = random.choice(list(_manifest_index().values()))
262
+ choices = _building_choices(rec.weather_id)
263
+ return rec.weather_id, gr.update(choices=choices, value=rec.building_id)
264
+
265
+
266
+ def _startup_note() -> str:
267
+ try:
268
+ total = len(_manifest_index())
269
+ weather_count = len(_weather_choices())
270
+ return f"Manifest loaded: {total} samples, {weather_count} weather cities from {DATASET_REPO_ID}."
271
+ except Exception as exc:
272
+ return f"Manifest will be loaded lazily on first run. Reason: {exc}"
273
+
274
+
275
+ default_weathers, default_weather, default_buildings, default_building = _safe_dropdown_defaults()
276
+
277
+
278
+ APP_CSS = """
279
+ .gradio-container {
280
+ max-width: 1920px !important;
281
+ }
282
+ #ctrl-row-1, #ctrl-row-2, #ctrl-row-3 {
283
+ gap: 8px !important;
284
+ }
285
+ #ctrl-row-1 .gr-block, #ctrl-row-2 .gr-block, #ctrl-row-3 .gr-block {
286
+ padding-top: 4px !important;
287
+ padding-bottom: 4px !important;
288
+ }
289
+ #status-box {
290
+ margin-top: 2px !important;
291
+ margin-bottom: 4px !important;
292
+ }
293
+ #viz-row-all {
294
+ gap: 8px !important;
295
+ }
296
+ #viz-row-all img {
297
+ object-fit: contain !important;
298
+ }
299
+ """
300
+
301
+
302
+ with gr.Blocks(title="ArchEGraph Visualizer", css=APP_CSS) as demo:
303
+ gr.Markdown(
304
+ "# ArchEGraph Visualizer\n"
305
+ "Visualize geometry, graph, weather and energy files from "
306
+ "[ArchEGraph/ArchEGraph-demo](https://huggingface.co/datasets/ArchEGraph/ArchEGraph-demo)."
307
+ )
308
+ gr.Markdown(_startup_note())
309
+
310
+ with gr.Row(elem_id="ctrl-row-1"):
311
+ weather_dropdown = gr.Dropdown(
312
+ label="Weather City",
313
+ choices=default_weathers,
314
+ value=default_weather,
315
+ allow_custom_value=False,
316
+ scale=2,
317
+ )
318
+ building_dropdown = gr.Dropdown(
319
+ label="Building ID",
320
+ choices=default_buildings,
321
+ value=default_building,
322
+ allow_custom_value=True,
323
+ scale=2,
324
+ )
325
+ energy_zone_index_input = gr.Slider(
326
+ label="Energy Zone Index",
327
+ minimum=0,
328
+ maximum=24,
329
+ step=1,
330
+ value=DEFAULT_ENERGY_ZONE_INDEX,
331
+ scale=1,
332
+ )
333
+
334
+ with gr.Row(elem_id="ctrl-row-2"):
335
+ use_custom_window_input = gr.Checkbox(
336
+ label="Custom Window",
337
+ value=False,
338
+ scale=1,
339
+ )
340
+ window_start_hour_input = gr.Slider(
341
+ label="Start Hour",
342
+ minimum=1,
343
+ maximum=8760,
344
+ step=1,
345
+ value=DEFAULT_WEATHER_WINDOW_START_HOUR,
346
+ scale=2,
347
+ )
348
+ window_hours_input = gr.Slider(
349
+ label="Window Hours",
350
+ minimum=1,
351
+ maximum=8760,
352
+ step=1,
353
+ value=DEFAULT_WEATHER_WINDOW_HOURS,
354
+ scale=2,
355
+ )
356
+
357
+ with gr.Row(elem_id="ctrl-row-3"):
358
+ run_btn = gr.Button("Visualize", variant="primary")
359
+ random_btn = gr.Button("Pick Random Sample")
360
+
361
+ status_md = gr.Markdown(elem_id="status-box")
362
+
363
+ with gr.Row(elem_id="viz-row-all"):
364
+ geometry_img = gr.Image(label="Geometry", type="filepath", height=215)
365
+ graph_img = gr.Image(label="Building", type="filepath", height=215)
366
+ overlap_img = gr.Image(label="Overlap", type="filepath", height=215)
367
+ weather_img = gr.Image(label="Weather (line)", type="filepath", height=215)
368
+ energy_img = gr.Image(label="Energy (selected zone)", type="filepath", height=215)
369
+
370
+ weather_dropdown.change(
371
+ fn=update_building_dropdown,
372
+ inputs=[weather_dropdown],
373
+ outputs=[building_dropdown],
374
+ )
375
+
376
+ run_btn.click(
377
+ fn=render_sample,
378
+ inputs=[
379
+ weather_dropdown,
380
+ building_dropdown,
381
+ energy_zone_index_input,
382
+ use_custom_window_input,
383
+ window_start_hour_input,
384
+ window_hours_input,
385
+ ],
386
+ outputs=[geometry_img, graph_img, overlap_img, weather_img, energy_img, status_md],
387
+ api_name="render_sample",
388
+ )
389
+
390
+ random_btn.click(
391
+ fn=pick_random_sample,
392
+ inputs=None,
393
+ outputs=[weather_dropdown, building_dropdown],
394
+ api_name="pick_random_sample",
395
+ )
396
+
397
+
398
+ demo.queue()
399
+
400
+ if __name__ == "__main__":
401
+ demo.launch()
pyproject.toml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.ruff]
2
+ # Enable pycodestyle (`E`) and Pyflakes (`F`) codes by default.
3
+ select = ["E", "F"]
4
+ ignore = ["E501"] # line too long (black is taking care of this)
5
+ line-length = 119
6
+ fixable = ["A", "B", "C", "D", "E", "F", "G", "I", "N", "Q", "S", "T", "W", "ANN", "ARG", "BLE", "COM", "DJ", "DTZ", "EM", "ERA", "EXE", "FBT", "ICN", "INP", "ISC", "NPY", "PD", "PGH", "PIE", "PL", "PT", "PTH", "PYI", "RET", "RSE", "RUF", "SIM", "SLF", "TCH", "TID", "TRY", "UP", "YTT"]
7
+
8
+ [tool.isort]
9
+ profile = "black"
10
+ line_length = 119
11
+
12
+ [tool.black]
13
+ line-length = 119
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ gradio>=5.0.0
2
+ huggingface_hub>=0.26.0
3
+ matplotlib>=3.8.0
4
+ numpy>=1.26.0
src/about.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from enum import Enum
3
+
4
+ @dataclass
5
+ class Task:
6
+ benchmark: str
7
+ metric: str
8
+ col_name: str
9
+
10
+
11
+ # Select your tasks here
12
+ # ---------------------------------------------------
13
+ class Tasks(Enum):
14
+ # task_key in the json file, metric_key in the json file, name to display in the leaderboard
15
+ task0 = Task("anli_r1", "acc", "ANLI")
16
+ task1 = Task("logiqa", "acc_norm", "LogiQA")
17
+
18
+ NUM_FEWSHOT = 0 # Change with your few shot
19
+ # ---------------------------------------------------
20
+
21
+
22
+
23
+ # Your leaderboard name
24
+ TITLE = """<h1 align="center" id="space-title">Demo leaderboard</h1>"""
25
+
26
+ # What does your leaderboard evaluate?
27
+ INTRODUCTION_TEXT = """
28
+ Intro text
29
+ """
30
+
31
+ # Which evaluations are you running? how can people reproduce what you have?
32
+ LLM_BENCHMARKS_TEXT = f"""
33
+ ## How it works
34
+
35
+ ## Reproducibility
36
+ To reproduce our results, here is the commands you can run:
37
+
38
+ """
39
+
40
+ EVALUATION_QUEUE_TEXT = """
41
+ ## Some good practices before submitting a model
42
+
43
+ ### 1) Make sure you can load your model and tokenizer using AutoClasses:
44
+ ```python
45
+ from transformers import AutoConfig, AutoModel, AutoTokenizer
46
+ config = AutoConfig.from_pretrained("your model name", revision=revision)
47
+ model = AutoModel.from_pretrained("your model name", revision=revision)
48
+ tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
49
+ ```
50
+ If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
51
+
52
+ Note: make sure your model is public!
53
+ Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted!
54
+
55
+ ### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index)
56
+ It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!
57
+
58
+ ### 3) Make sure your model has an open license!
59
+ This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗
60
+
61
+ ### 4) Fill up your model card
62
+ When we add extra information about models to the leaderboard, it will be automatically taken from the model card
63
+
64
+ ## In case of model failure
65
+ If your model is displayed in the `FAILED` category, its execution stopped.
66
+ Make sure you have followed the above steps first.
67
+ If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task).
68
+ """
69
+
70
+ CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
71
+ CITATION_BUTTON_TEXT = r"""
72
+ """
src/display/css_html_js.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ custom_css = """
2
+
3
+ .markdown-text {
4
+ font-size: 16px !important;
5
+ }
6
+
7
+ #models-to-add-text {
8
+ font-size: 18px !important;
9
+ }
10
+
11
+ #citation-button span {
12
+ font-size: 16px !important;
13
+ }
14
+
15
+ #citation-button textarea {
16
+ font-size: 16px !important;
17
+ }
18
+
19
+ #citation-button > label > button {
20
+ margin: 6px;
21
+ transform: scale(1.3);
22
+ }
23
+
24
+ #leaderboard-table {
25
+ margin-top: 15px
26
+ }
27
+
28
+ #leaderboard-table-lite {
29
+ margin-top: 15px
30
+ }
31
+
32
+ #search-bar-table-box > div:first-child {
33
+ background: none;
34
+ border: none;
35
+ }
36
+
37
+ #search-bar {
38
+ padding: 0px;
39
+ }
40
+
41
+ /* Limit the width of the first AutoEvalColumn so that names don't expand too much */
42
+ #leaderboard-table td:nth-child(2),
43
+ #leaderboard-table th:nth-child(2) {
44
+ max-width: 400px;
45
+ overflow: auto;
46
+ white-space: nowrap;
47
+ }
48
+
49
+ .tab-buttons button {
50
+ font-size: 20px;
51
+ }
52
+
53
+ #scale-logo {
54
+ border-style: none !important;
55
+ box-shadow: none;
56
+ display: block;
57
+ margin-left: auto;
58
+ margin-right: auto;
59
+ max-width: 600px;
60
+ }
61
+
62
+ #scale-logo .download {
63
+ display: none;
64
+ }
65
+ #filter_type{
66
+ border: 0;
67
+ padding-left: 0;
68
+ padding-top: 0;
69
+ }
70
+ #filter_type label {
71
+ display: flex;
72
+ }
73
+ #filter_type label > span{
74
+ margin-top: var(--spacing-lg);
75
+ margin-right: 0.5em;
76
+ }
77
+ #filter_type label > .wrap{
78
+ width: 103px;
79
+ }
80
+ #filter_type label > .wrap .wrap-inner{
81
+ padding: 2px;
82
+ }
83
+ #filter_type label > .wrap .wrap-inner input{
84
+ width: 1px
85
+ }
86
+ #filter-columns-type{
87
+ border:0;
88
+ padding:0.5;
89
+ }
90
+ #filter-columns-size{
91
+ border:0;
92
+ padding:0.5;
93
+ }
94
+ #box-filter > .form{
95
+ border: 0
96
+ }
97
+ """
98
+
99
+ get_window_url_params = """
100
+ function(url_params) {
101
+ const params = new URLSearchParams(window.location.search);
102
+ url_params = Object.fromEntries(params);
103
+ return url_params;
104
+ }
105
+ """
src/display/formatting.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def model_hyperlink(link, model_name):
2
+ return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
3
+
4
+
5
+ def make_clickable_model(model_name):
6
+ link = f"https://huggingface.co/{model_name}"
7
+ return model_hyperlink(link, model_name)
8
+
9
+
10
+ def styled_error(error):
11
+ return f"<p style='color: red; font-size: 20px; text-align: center;'>{error}</p>"
12
+
13
+
14
+ def styled_warning(warn):
15
+ return f"<p style='color: orange; font-size: 20px; text-align: center;'>{warn}</p>"
16
+
17
+
18
+ def styled_message(message):
19
+ return f"<p style='color: green; font-size: 20px; text-align: center;'>{message}</p>"
20
+
21
+
22
+ def has_no_nan_values(df, columns):
23
+ return df[columns].notna().all(axis=1)
24
+
25
+
26
+ def has_nan_values(df, columns):
27
+ return df[columns].isna().any(axis=1)
src/display/utils.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass, make_dataclass
2
+ from enum import Enum
3
+
4
+ import pandas as pd
5
+
6
+ from src.about import Tasks
7
+
8
+ def fields(raw_class):
9
+ return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
10
+
11
+
12
+ # These classes are for user facing column names,
13
+ # to avoid having to change them all around the code
14
+ # when a modif is needed
15
+ @dataclass
16
+ class ColumnContent:
17
+ name: str
18
+ type: str
19
+ displayed_by_default: bool
20
+ hidden: bool = False
21
+ never_hidden: bool = False
22
+
23
+ ## Leaderboard columns
24
+ auto_eval_column_dict = []
25
+ # Init
26
+ auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
27
+ auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
28
+ #Scores
29
+ auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
30
+ for task in Tasks:
31
+ auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
32
+ # Model information
33
+ auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
34
+ auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
35
+ auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
36
+ auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
37
+ auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
38
+ auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
39
+ auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
40
+ auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
41
+ auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
42
+
43
+ # We use make dataclass to dynamically fill the scores from Tasks
44
+ AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
45
+
46
+ ## For the queue columns in the submission tab
47
+ @dataclass(frozen=True)
48
+ class EvalQueueColumn: # Queue column
49
+ model = ColumnContent("model", "markdown", True)
50
+ revision = ColumnContent("revision", "str", True)
51
+ private = ColumnContent("private", "bool", True)
52
+ precision = ColumnContent("precision", "str", True)
53
+ weight_type = ColumnContent("weight_type", "str", "Original")
54
+ status = ColumnContent("status", "str", True)
55
+
56
+ ## All the model information that we might need
57
+ @dataclass
58
+ class ModelDetails:
59
+ name: str
60
+ display_name: str = ""
61
+ symbol: str = "" # emoji
62
+
63
+
64
+ class ModelType(Enum):
65
+ PT = ModelDetails(name="pretrained", symbol="🟢")
66
+ FT = ModelDetails(name="fine-tuned", symbol="🔶")
67
+ IFT = ModelDetails(name="instruction-tuned", symbol="⭕")
68
+ RL = ModelDetails(name="RL-tuned", symbol="🟦")
69
+ Unknown = ModelDetails(name="", symbol="?")
70
+
71
+ def to_str(self, separator=" "):
72
+ return f"{self.value.symbol}{separator}{self.value.name}"
73
+
74
+ @staticmethod
75
+ def from_str(type):
76
+ if "fine-tuned" in type or "🔶" in type:
77
+ return ModelType.FT
78
+ if "pretrained" in type or "🟢" in type:
79
+ return ModelType.PT
80
+ if "RL-tuned" in type or "🟦" in type:
81
+ return ModelType.RL
82
+ if "instruction-tuned" in type or "⭕" in type:
83
+ return ModelType.IFT
84
+ return ModelType.Unknown
85
+
86
+ class WeightType(Enum):
87
+ Adapter = ModelDetails("Adapter")
88
+ Original = ModelDetails("Original")
89
+ Delta = ModelDetails("Delta")
90
+
91
+ class Precision(Enum):
92
+ float16 = ModelDetails("float16")
93
+ bfloat16 = ModelDetails("bfloat16")
94
+ Unknown = ModelDetails("?")
95
+
96
+ def from_str(precision):
97
+ if precision in ["torch.float16", "float16"]:
98
+ return Precision.float16
99
+ if precision in ["torch.bfloat16", "bfloat16"]:
100
+ return Precision.bfloat16
101
+ return Precision.Unknown
102
+
103
+ # Column selection
104
+ COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
105
+
106
+ EVAL_COLS = [c.name for c in fields(EvalQueueColumn)]
107
+ EVAL_TYPES = [c.type for c in fields(EvalQueueColumn)]
108
+
109
+ BENCHMARK_COLS = [t.value.col_name for t in Tasks]
110
+
src/envs.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from huggingface_hub import HfApi
4
+
5
+ # Info to change for your repository
6
+ # ----------------------------------
7
+ TOKEN = os.environ.get("HF_TOKEN") # A read/write token for your org
8
+
9
+ OWNER = "demo-leaderboard-backend" # Change to your org - don't forget to create a results and request dataset, with the correct format!
10
+ # ----------------------------------
11
+
12
+ REPO_ID = f"{OWNER}/leaderboard"
13
+ QUEUE_REPO = f"{OWNER}/requests"
14
+ RESULTS_REPO = f"{OWNER}/results"
15
+
16
+ # If you setup a cache later, just change HF_HOME
17
+ CACHE_PATH=os.getenv("HF_HOME", ".")
18
+
19
+ # Local caches
20
+ EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue")
21
+ EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
22
+ EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
23
+ EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
24
+
25
+ API = HfApi(token=TOKEN)
src/leaderboard/read_evals.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import json
3
+ import math
4
+ import os
5
+ from dataclasses import dataclass
6
+
7
+ import dateutil
8
+ import numpy as np
9
+
10
+ from src.display.formatting import make_clickable_model
11
+ from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType
12
+ from src.submission.check_validity import is_model_on_hub
13
+
14
+
15
+ @dataclass
16
+ class EvalResult:
17
+ """Represents one full evaluation. Built from a combination of the result and request file for a given run.
18
+ """
19
+ eval_name: str # org_model_precision (uid)
20
+ full_model: str # org/model (path on hub)
21
+ org: str
22
+ model: str
23
+ revision: str # commit hash, "" if main
24
+ results: dict
25
+ precision: Precision = Precision.Unknown
26
+ model_type: ModelType = ModelType.Unknown # Pretrained, fine tuned, ...
27
+ weight_type: WeightType = WeightType.Original # Original or Adapter
28
+ architecture: str = "Unknown"
29
+ license: str = "?"
30
+ likes: int = 0
31
+ num_params: int = 0
32
+ date: str = "" # submission date of request file
33
+ still_on_hub: bool = False
34
+
35
+ @classmethod
36
+ def init_from_json_file(self, json_filepath):
37
+ """Inits the result from the specific model result file"""
38
+ with open(json_filepath) as fp:
39
+ data = json.load(fp)
40
+
41
+ config = data.get("config")
42
+
43
+ # Precision
44
+ precision = Precision.from_str(config.get("model_dtype"))
45
+
46
+ # Get model and org
47
+ org_and_model = config.get("model_name", config.get("model_args", None))
48
+ org_and_model = org_and_model.split("/", 1)
49
+
50
+ if len(org_and_model) == 1:
51
+ org = None
52
+ model = org_and_model[0]
53
+ result_key = f"{model}_{precision.value.name}"
54
+ else:
55
+ org = org_and_model[0]
56
+ model = org_and_model[1]
57
+ result_key = f"{org}_{model}_{precision.value.name}"
58
+ full_model = "/".join(org_and_model)
59
+
60
+ still_on_hub, _, model_config = is_model_on_hub(
61
+ full_model, config.get("model_sha", "main"), trust_remote_code=True, test_tokenizer=False
62
+ )
63
+ architecture = "?"
64
+ if model_config is not None:
65
+ architectures = getattr(model_config, "architectures", None)
66
+ if architectures:
67
+ architecture = ";".join(architectures)
68
+
69
+ # Extract results available in this file (some results are split in several files)
70
+ results = {}
71
+ for task in Tasks:
72
+ task = task.value
73
+
74
+ # We average all scores of a given metric (not all metrics are present in all files)
75
+ accs = np.array([v.get(task.metric, None) for k, v in data["results"].items() if task.benchmark == k])
76
+ if accs.size == 0 or any([acc is None for acc in accs]):
77
+ continue
78
+
79
+ mean_acc = np.mean(accs) * 100.0
80
+ results[task.benchmark] = mean_acc
81
+
82
+ return self(
83
+ eval_name=result_key,
84
+ full_model=full_model,
85
+ org=org,
86
+ model=model,
87
+ results=results,
88
+ precision=precision,
89
+ revision= config.get("model_sha", ""),
90
+ still_on_hub=still_on_hub,
91
+ architecture=architecture
92
+ )
93
+
94
+ def update_with_request_file(self, requests_path):
95
+ """Finds the relevant request file for the current model and updates info with it"""
96
+ request_file = get_request_file_for_model(requests_path, self.full_model, self.precision.value.name)
97
+
98
+ try:
99
+ with open(request_file, "r") as f:
100
+ request = json.load(f)
101
+ self.model_type = ModelType.from_str(request.get("model_type", ""))
102
+ self.weight_type = WeightType[request.get("weight_type", "Original")]
103
+ self.license = request.get("license", "?")
104
+ self.likes = request.get("likes", 0)
105
+ self.num_params = request.get("params", 0)
106
+ self.date = request.get("submitted_time", "")
107
+ except Exception:
108
+ print(f"Could not find request file for {self.org}/{self.model} with precision {self.precision.value.name}")
109
+
110
+ def to_dict(self):
111
+ """Converts the Eval Result to a dict compatible with our dataframe display"""
112
+ average = sum([v for v in self.results.values() if v is not None]) / len(Tasks)
113
+ data_dict = {
114
+ "eval_name": self.eval_name, # not a column, just a save name,
115
+ AutoEvalColumn.precision.name: self.precision.value.name,
116
+ AutoEvalColumn.model_type.name: self.model_type.value.name,
117
+ AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol,
118
+ AutoEvalColumn.weight_type.name: self.weight_type.value.name,
119
+ AutoEvalColumn.architecture.name: self.architecture,
120
+ AutoEvalColumn.model.name: make_clickable_model(self.full_model),
121
+ AutoEvalColumn.revision.name: self.revision,
122
+ AutoEvalColumn.average.name: average,
123
+ AutoEvalColumn.license.name: self.license,
124
+ AutoEvalColumn.likes.name: self.likes,
125
+ AutoEvalColumn.params.name: self.num_params,
126
+ AutoEvalColumn.still_on_hub.name: self.still_on_hub,
127
+ }
128
+
129
+ for task in Tasks:
130
+ data_dict[task.value.col_name] = self.results[task.value.benchmark]
131
+
132
+ return data_dict
133
+
134
+
135
+ def get_request_file_for_model(requests_path, model_name, precision):
136
+ """Selects the correct request file for a given model. Only keeps runs tagged as FINISHED"""
137
+ request_files = os.path.join(
138
+ requests_path,
139
+ f"{model_name}_eval_request_*.json",
140
+ )
141
+ request_files = glob.glob(request_files)
142
+
143
+ # Select correct request file (precision)
144
+ request_file = ""
145
+ request_files = sorted(request_files, reverse=True)
146
+ for tmp_request_file in request_files:
147
+ with open(tmp_request_file, "r") as f:
148
+ req_content = json.load(f)
149
+ if (
150
+ req_content["status"] in ["FINISHED"]
151
+ and req_content["precision"] == precision.split(".")[-1]
152
+ ):
153
+ request_file = tmp_request_file
154
+ return request_file
155
+
156
+
157
+ def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]:
158
+ """From the path of the results folder root, extract all needed info for results"""
159
+ model_result_filepaths = []
160
+
161
+ for root, _, files in os.walk(results_path):
162
+ # We should only have json files in model results
163
+ if len(files) == 0 or any([not f.endswith(".json") for f in files]):
164
+ continue
165
+
166
+ # Sort the files by date
167
+ try:
168
+ files.sort(key=lambda x: x.removesuffix(".json").removeprefix("results_")[:-7])
169
+ except dateutil.parser._parser.ParserError:
170
+ files = [files[-1]]
171
+
172
+ for file in files:
173
+ model_result_filepaths.append(os.path.join(root, file))
174
+
175
+ eval_results = {}
176
+ for model_result_filepath in model_result_filepaths:
177
+ # Creation of result
178
+ eval_result = EvalResult.init_from_json_file(model_result_filepath)
179
+ eval_result.update_with_request_file(requests_path)
180
+
181
+ # Store results of same eval together
182
+ eval_name = eval_result.eval_name
183
+ if eval_name in eval_results.keys():
184
+ eval_results[eval_name].results.update({k: v for k, v in eval_result.results.items() if v is not None})
185
+ else:
186
+ eval_results[eval_name] = eval_result
187
+
188
+ results = []
189
+ for v in eval_results.values():
190
+ try:
191
+ v.to_dict() # we test if the dict version is complete
192
+ results.append(v)
193
+ except KeyError: # not all eval values present
194
+ continue
195
+
196
+ return results
src/populate.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+
4
+ import pandas as pd
5
+
6
+ from src.display.formatting import has_no_nan_values, make_clickable_model
7
+ from src.display.utils import AutoEvalColumn, EvalQueueColumn
8
+ from src.leaderboard.read_evals import get_raw_eval_results
9
+
10
+
11
+ def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
12
+ """Creates a dataframe from all the individual experiment results"""
13
+ raw_data = get_raw_eval_results(results_path, requests_path)
14
+ all_data_json = [v.to_dict() for v in raw_data]
15
+
16
+ df = pd.DataFrame.from_records(all_data_json)
17
+ df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
18
+ df = df[cols].round(decimals=2)
19
+
20
+ # filter out if any of the benchmarks have not been produced
21
+ df = df[has_no_nan_values(df, benchmark_cols)]
22
+ return df
23
+
24
+
25
+ def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
26
+ """Creates the different dataframes for the evaluation queues requestes"""
27
+ entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
28
+ all_evals = []
29
+
30
+ for entry in entries:
31
+ if ".json" in entry:
32
+ file_path = os.path.join(save_path, entry)
33
+ with open(file_path) as fp:
34
+ data = json.load(fp)
35
+
36
+ data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
37
+ data[EvalQueueColumn.revision.name] = data.get("revision", "main")
38
+
39
+ all_evals.append(data)
40
+ elif ".md" not in entry:
41
+ # this is a folder
42
+ sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if os.path.isfile(e) and not e.startswith(".")]
43
+ for sub_entry in sub_entries:
44
+ file_path = os.path.join(save_path, entry, sub_entry)
45
+ with open(file_path) as fp:
46
+ data = json.load(fp)
47
+
48
+ data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
49
+ data[EvalQueueColumn.revision.name] = data.get("revision", "main")
50
+ all_evals.append(data)
51
+
52
+ pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
53
+ running_list = [e for e in all_evals if e["status"] == "RUNNING"]
54
+ finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
55
+ df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
56
+ df_running = pd.DataFrame.from_records(running_list, columns=cols)
57
+ df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
58
+ return df_finished[cols], df_running[cols], df_pending[cols]
src/submission/check_validity.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import re
4
+ from collections import defaultdict
5
+ from datetime import datetime, timedelta, timezone
6
+
7
+ import huggingface_hub
8
+ from huggingface_hub import ModelCard
9
+ from huggingface_hub.hf_api import ModelInfo
10
+ from transformers import AutoConfig
11
+ from transformers.models.auto.tokenization_auto import AutoTokenizer
12
+
13
+ def check_model_card(repo_id: str) -> tuple[bool, str]:
14
+ """Checks if the model card and license exist and have been filled"""
15
+ try:
16
+ card = ModelCard.load(repo_id)
17
+ except huggingface_hub.utils.EntryNotFoundError:
18
+ return False, "Please add a model card to your model to explain how you trained/fine-tuned it."
19
+
20
+ # Enforce license metadata
21
+ if card.data.license is None:
22
+ if not ("license_name" in card.data and "license_link" in card.data):
23
+ return False, (
24
+ "License not found. Please add a license to your model card using the `license` metadata or a"
25
+ " `license_name`/`license_link` pair."
26
+ )
27
+
28
+ # Enforce card content
29
+ if len(card.text) < 200:
30
+ return False, "Please add a description to your model card, it is too short."
31
+
32
+ return True, ""
33
+
34
+ def is_model_on_hub(model_name: str, revision: str, token: str = None, trust_remote_code=False, test_tokenizer=False) -> tuple[bool, str]:
35
+ """Checks if the model model_name is on the hub, and whether it (and its tokenizer) can be loaded with AutoClasses."""
36
+ try:
37
+ config = AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
38
+ if test_tokenizer:
39
+ try:
40
+ tk = AutoTokenizer.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
41
+ except ValueError as e:
42
+ return (
43
+ False,
44
+ f"uses a tokenizer which is not in a transformers release: {e}",
45
+ None
46
+ )
47
+ except Exception as e:
48
+ return (False, "'s tokenizer cannot be loaded. Is your tokenizer class in a stable transformers release, and correctly configured?", None)
49
+ return True, None, config
50
+
51
+ except ValueError:
52
+ return (
53
+ False,
54
+ "needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard.",
55
+ None
56
+ )
57
+
58
+ except Exception as e:
59
+ return False, "was not found on hub!", None
60
+
61
+
62
+ def get_model_size(model_info: ModelInfo, precision: str):
63
+ """Gets the model size from the configuration, or the model name if the configuration does not contain the information."""
64
+ try:
65
+ model_size = round(model_info.safetensors["total"] / 1e9, 3)
66
+ except (AttributeError, TypeError):
67
+ return 0 # Unknown model sizes are indicated as 0, see NUMERIC_INTERVALS in app.py
68
+
69
+ size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.modelId.lower()) else 1
70
+ model_size = size_factor * model_size
71
+ return model_size
72
+
73
+ def get_model_arch(model_info: ModelInfo):
74
+ """Gets the model architecture from the configuration"""
75
+ return model_info.config.get("architectures", "Unknown")
76
+
77
+ def already_submitted_models(requested_models_dir: str) -> set[str]:
78
+ """Gather a list of already submitted models to avoid duplicates"""
79
+ depth = 1
80
+ file_names = []
81
+ users_to_submission_dates = defaultdict(list)
82
+
83
+ for root, _, files in os.walk(requested_models_dir):
84
+ current_depth = root.count(os.sep) - requested_models_dir.count(os.sep)
85
+ if current_depth == depth:
86
+ for file in files:
87
+ if not file.endswith(".json"):
88
+ continue
89
+ with open(os.path.join(root, file), "r") as f:
90
+ info = json.load(f)
91
+ file_names.append(f"{info['model']}_{info['revision']}_{info['precision']}")
92
+
93
+ # Select organisation
94
+ if info["model"].count("/") == 0 or "submitted_time" not in info:
95
+ continue
96
+ organisation, _ = info["model"].split("/")
97
+ users_to_submission_dates[organisation].append(info["submitted_time"])
98
+
99
+ return set(file_names), users_to_submission_dates
src/submission/submit.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from datetime import datetime, timezone
4
+
5
+ from src.display.formatting import styled_error, styled_message, styled_warning
6
+ from src.envs import API, EVAL_REQUESTS_PATH, TOKEN, QUEUE_REPO
7
+ from src.submission.check_validity import (
8
+ already_submitted_models,
9
+ check_model_card,
10
+ get_model_size,
11
+ is_model_on_hub,
12
+ )
13
+
14
+ REQUESTED_MODELS = None
15
+ USERS_TO_SUBMISSION_DATES = None
16
+
17
+ def add_new_eval(
18
+ model: str,
19
+ base_model: str,
20
+ revision: str,
21
+ precision: str,
22
+ weight_type: str,
23
+ model_type: str,
24
+ ):
25
+ global REQUESTED_MODELS
26
+ global USERS_TO_SUBMISSION_DATES
27
+ if not REQUESTED_MODELS:
28
+ REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
29
+
30
+ user_name = ""
31
+ model_path = model
32
+ if "/" in model:
33
+ user_name = model.split("/")[0]
34
+ model_path = model.split("/")[1]
35
+
36
+ precision = precision.split(" ")[0]
37
+ current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
38
+
39
+ if model_type is None or model_type == "":
40
+ return styled_error("Please select a model type.")
41
+
42
+ # Does the model actually exist?
43
+ if revision == "":
44
+ revision = "main"
45
+
46
+ # Is the model on the hub?
47
+ if weight_type in ["Delta", "Adapter"]:
48
+ base_model_on_hub, error, _ = is_model_on_hub(model_name=base_model, revision=revision, token=TOKEN, test_tokenizer=True)
49
+ if not base_model_on_hub:
50
+ return styled_error(f'Base model "{base_model}" {error}')
51
+
52
+ if not weight_type == "Adapter":
53
+ model_on_hub, error, _ = is_model_on_hub(model_name=model, revision=revision, token=TOKEN, test_tokenizer=True)
54
+ if not model_on_hub:
55
+ return styled_error(f'Model "{model}" {error}')
56
+
57
+ # Is the model info correctly filled?
58
+ try:
59
+ model_info = API.model_info(repo_id=model, revision=revision)
60
+ except Exception:
61
+ return styled_error("Could not get your model information. Please fill it up properly.")
62
+
63
+ model_size = get_model_size(model_info=model_info, precision=precision)
64
+
65
+ # Were the model card and license filled?
66
+ try:
67
+ license = model_info.cardData["license"]
68
+ except Exception:
69
+ return styled_error("Please select a license for your model")
70
+
71
+ modelcard_OK, error_msg = check_model_card(model)
72
+ if not modelcard_OK:
73
+ return styled_error(error_msg)
74
+
75
+ # Seems good, creating the eval
76
+ print("Adding new eval")
77
+
78
+ eval_entry = {
79
+ "model": model,
80
+ "base_model": base_model,
81
+ "revision": revision,
82
+ "precision": precision,
83
+ "weight_type": weight_type,
84
+ "status": "PENDING",
85
+ "submitted_time": current_time,
86
+ "model_type": model_type,
87
+ "likes": model_info.likes,
88
+ "params": model_size,
89
+ "license": license,
90
+ "private": False,
91
+ }
92
+
93
+ # Check for duplicate submission
94
+ if f"{model}_{revision}_{precision}" in REQUESTED_MODELS:
95
+ return styled_warning("This model has been already submitted.")
96
+
97
+ print("Creating eval file")
98
+ OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
99
+ os.makedirs(OUT_DIR, exist_ok=True)
100
+ out_path = f"{OUT_DIR}/{model_path}_eval_request_False_{precision}_{weight_type}.json"
101
+
102
+ with open(out_path, "w") as f:
103
+ f.write(json.dumps(eval_entry))
104
+
105
+ print("Uploading eval file")
106
+ API.upload_file(
107
+ path_or_fileobj=out_path,
108
+ path_in_repo=out_path.split("eval-queue/")[1],
109
+ repo_id=QUEUE_REPO,
110
+ repo_type="dataset",
111
+ commit_message=f"Add {model} to eval queue",
112
+ )
113
+
114
+ # Remove the local file
115
+ os.remove(out_path)
116
+
117
+ return styled_message(
118
+ "Your request has been submitted to the evaluation queue!\nPlease wait for up to an hour for the model to show in the PENDING list."
119
+ )
visualization/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .energy import visualize_energy
2
+ from .geometry import visualize_geometry
3
+ from .building import visualize_graph
4
+ from .weather import visualize_weather
5
+
6
+ __all__ = [
7
+ "visualize_geometry",
8
+ "visualize_graph",
9
+ "visualize_weather",
10
+ "visualize_energy",
11
+ ]
visualization/building.py ADDED
@@ -0,0 +1,692 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from pathlib import Path
4
+
5
+ import matplotlib
6
+ import numpy as np
7
+ from matplotlib import colors
8
+
9
+ matplotlib.use("Agg")
10
+ import matplotlib.pyplot as plt
11
+
12
+ DEFAULT_PANEL_SIZE_MULTIPLIER = 1.5
13
+ DEFAULT_BILLBOARD_ALPHA = 1.0
14
+ DEFAULT_PANEL_SCALE = 0.12
15
+ DEFAULT_DAY_STEP = 1
16
+
17
+ DEFAULT_DIVERGING_CMAP = colors.LinearSegmentedColormap.from_list(
18
+ "zone_load_diverging",
19
+ ["#82B0D2", "#FFFFFF", "#FA7F6F"],
20
+ N=256,
21
+ )
22
+
23
+ # Keep color mapping consistent with cuger/__analyse/visualise.py.
24
+ TYPE_COLORS = {
25
+ "window": "#FFBE7A",
26
+ "shading": "#999999",
27
+ "floor": "#82B0D2",
28
+ "wall": "#8ECFC9",
29
+ "airwall": "#E7DAD2",
30
+ "space": "#FA7F6F",
31
+ "void": "#FFFFFF",
32
+ None: "#FFFFFF",
33
+ }
34
+
35
+
36
+ def _axis_limits_from_points(points: np.ndarray) -> tuple[tuple[float, float], tuple[float, float], tuple[float, float]]:
37
+ x_min, x_max = float(np.min(points[:, 0])), float(np.max(points[:, 0]))
38
+ y_min, y_max = float(np.min(points[:, 1])), float(np.max(points[:, 1]))
39
+ z_min, z_max = float(np.min(points[:, 2])), float(np.max(points[:, 2]))
40
+
41
+ max_range = max(x_max - x_min, y_max - y_min, z_max - z_min) / 2.0
42
+ max_range = max(max_range, 1e-6) * 1.08
43
+
44
+ x_mid = (x_max + x_min) / 2.0
45
+ y_mid = (y_max + y_min) / 2.0
46
+ z_mid = (z_max + z_min) / 2.0
47
+
48
+ return (
49
+ (x_mid - max_range, x_mid + max_range),
50
+ (y_mid - max_range, y_mid + max_range),
51
+ (z_mid - max_range, z_mid + max_range),
52
+ )
53
+
54
+
55
+ def _first_existing(data: dict[str, np.ndarray], keys: list[str]) -> np.ndarray | None:
56
+ for k in keys:
57
+ if k in data:
58
+ return np.asarray(data[k])
59
+ return None
60
+
61
+
62
+ def _as_2d_points(arr: np.ndarray | None) -> np.ndarray:
63
+ if arr is None:
64
+ return np.zeros((0, 3), dtype=float)
65
+ pts = np.asarray(arr, dtype=float)
66
+ if pts.ndim != 2 or pts.shape[1] < 3:
67
+ return np.zeros((0, 3), dtype=float)
68
+ return pts[:, :3]
69
+
70
+
71
+ def _camera_basis(elev: float, azim: float) -> tuple[np.ndarray, np.ndarray]:
72
+ az = np.deg2rad(float(azim))
73
+ el = np.deg2rad(float(elev))
74
+
75
+ forward = np.array(
76
+ [
77
+ np.cos(el) * np.cos(az),
78
+ np.cos(el) * np.sin(az),
79
+ np.sin(el),
80
+ ],
81
+ dtype=float,
82
+ )
83
+ forward_norm = np.linalg.norm(forward)
84
+ if forward_norm < 1e-9:
85
+ forward = np.array([1.0, 0.0, 0.0], dtype=float)
86
+ else:
87
+ forward = forward / forward_norm
88
+
89
+ world_up = np.array([0.0, 0.0, 1.0], dtype=float)
90
+ right = np.cross(forward, world_up)
91
+ right_norm = np.linalg.norm(right)
92
+ if right_norm < 1e-9:
93
+ right = np.array([1.0, 0.0, 0.0], dtype=float)
94
+ else:
95
+ right = right / right_norm
96
+
97
+ up = np.cross(right, forward)
98
+ up_norm = np.linalg.norm(up)
99
+ if up_norm < 1e-9:
100
+ up = np.array([0.0, 1.0, 0.0], dtype=float)
101
+ else:
102
+ up = up / up_norm
103
+
104
+ return right, up
105
+
106
+
107
+ def _isometric_panel_basis(elev: float, azim: float) -> tuple[np.ndarray, np.ndarray]:
108
+ cam_right, cam_up = _camera_basis(elev=elev, azim=azim)
109
+ c30 = np.cos(np.deg2rad(30.0))
110
+ s30 = np.sin(np.deg2rad(30.0))
111
+
112
+ u = c30 * cam_right + s30 * cam_up
113
+ v = -c30 * cam_right + s30 * cam_up
114
+ u = u / max(np.linalg.norm(u), 1e-9)
115
+ v = v / max(np.linalg.norm(v), 1e-9)
116
+ return u, v
117
+
118
+
119
+ def _to_hourly_zone(values: np.ndarray) -> np.ndarray:
120
+ arr = np.asarray(values)
121
+ if arr.ndim != 2:
122
+ raise ValueError(f"`values` must be 2D, got shape={arr.shape}")
123
+
124
+ if arr.shape[0] == 8760:
125
+ return arr.astype(np.float32)
126
+
127
+ if arr.shape[1] == 8760:
128
+ return arr.T.astype(np.float32)
129
+
130
+ raise ValueError(f"Neither axis equals 8760, shape={arr.shape}")
131
+
132
+
133
+ def _try_parse_space_indices(raw_values: np.ndarray | None, zone_count: int, space_count: int) -> np.ndarray | None:
134
+ if raw_values is None:
135
+ return None
136
+
137
+ vals = np.asarray(raw_values).reshape(-1)
138
+ if vals.size < zone_count:
139
+ return None
140
+
141
+ out: list[int] = []
142
+ for i in range(zone_count):
143
+ v = vals[i]
144
+ idx: int | None = None
145
+
146
+ if isinstance(v, (int, np.integer)):
147
+ idx = int(v)
148
+ elif isinstance(v, (float, np.floating)):
149
+ vf = float(v)
150
+ if np.isfinite(vf) and float(vf).is_integer():
151
+ idx = int(vf)
152
+ else:
153
+ text = v.decode("utf-8", errors="ignore") if isinstance(v, (bytes, np.bytes_)) else str(v)
154
+ text = text.strip()
155
+ try:
156
+ idx = int(text)
157
+ except ValueError:
158
+ return None
159
+
160
+ if idx is None or not (0 <= idx < space_count):
161
+ return None
162
+ out.append(idx)
163
+
164
+ return np.asarray(out, dtype=np.int64)
165
+
166
+
167
+ def _zone_day_hour_matrix(zone_hourly: np.ndarray, day_step: int = 1) -> np.ndarray:
168
+ series = np.asarray(zone_hourly, dtype=np.float32).reshape(-1)
169
+ if series.size < 8760:
170
+ raise ValueError(f"Zone series length must be >= 8760, got {series.size}")
171
+
172
+ day_hour = series[:8760].reshape(365, 24)
173
+ step = max(1, int(day_step))
174
+ if step == 1:
175
+ return day_hour
176
+
177
+ rows: list[np.ndarray] = []
178
+ for s in range(0, 365, step):
179
+ e = min(365, s + step)
180
+ rows.append(np.mean(day_hour[s:e, :], axis=0))
181
+ return np.asarray(rows, dtype=np.float32)
182
+
183
+
184
+ def _decode_types(type_arr: np.ndarray | None, expected_count: int, fallback: str) -> list[str]:
185
+ if type_arr is None:
186
+ return [fallback] * expected_count
187
+
188
+ raw = np.asarray(type_arr).reshape(-1)
189
+ out: list[str] = []
190
+ for val in raw[:expected_count]:
191
+ if isinstance(val, (bytes, np.bytes_)):
192
+ text = val.decode("utf-8", errors="ignore").strip().lower()
193
+ else:
194
+ text = str(val).strip().lower()
195
+
196
+ if text in TYPE_COLORS:
197
+ out.append(text)
198
+ continue
199
+
200
+ # Numeric fallback for compact encodings.
201
+ try:
202
+ num = float(text)
203
+ if np.isfinite(num):
204
+ if num <= 0:
205
+ out.append("wall")
206
+ else:
207
+ out.append("window")
208
+ continue
209
+ except ValueError:
210
+ pass
211
+
212
+ out.append(fallback)
213
+
214
+ if len(out) < expected_count:
215
+ out.extend([fallback] * (expected_count - len(out)))
216
+ return out
217
+
218
+
219
+ def _face_colors_from_binary_t(face_feats: np.ndarray | None, face_count: int) -> list[str] | None:
220
+ """Use binary t from face_feats last column when available.
221
+
222
+ Rule requested by user: t == 0 -> yellow(window color).
223
+ """
224
+ if face_feats is None:
225
+ return None
226
+
227
+ feats = np.asarray(face_feats, dtype=float)
228
+ if feats.ndim != 2 or feats.shape[1] < 1 or feats.shape[0] < face_count:
229
+ return None
230
+
231
+ t_col = np.rint(feats[:face_count, -1]).astype(np.int32)
232
+ if not np.all(np.isin(t_col, [0, 1])):
233
+ return None
234
+
235
+ out: list[str] = []
236
+ for t_val in t_col:
237
+ if t_val == 0:
238
+ out.append(TYPE_COLORS["window"])
239
+ else:
240
+ out.append(TYPE_COLORS["wall"])
241
+ return out
242
+
243
+
244
+ def _plot_edges(ax, starts: np.ndarray, ends: np.ndarray, color: str, linewidth: float, linestyle: str, alpha: float) -> None:
245
+ for p0, p1 in zip(starts, ends):
246
+ ax.plot(
247
+ [p0[0], p1[0]],
248
+ [p0[1], p1[1]],
249
+ [p0[2], p1[2]],
250
+ color=color,
251
+ linewidth=linewidth,
252
+ linestyle=linestyle,
253
+ alpha=alpha,
254
+ )
255
+
256
+
257
+ def _infer_space_count(graph_data: dict[str, np.ndarray], sf_edges: np.ndarray | None) -> int:
258
+ valid_spaces = graph_data.get("valid_energy_spaces")
259
+ if valid_spaces is not None:
260
+ size = int(np.asarray(valid_spaces).reshape(-1).size)
261
+ if size > 0:
262
+ return size
263
+
264
+ space_feats = _first_existing(graph_data, ["space_feats", "space_c", "space_centers"])
265
+ if space_feats is not None:
266
+ feats = np.asarray(space_feats)
267
+ if feats.ndim >= 1 and feats.shape[0] > 0:
268
+ return int(feats.shape[0])
269
+
270
+ if sf_edges is not None:
271
+ edges = np.asarray(sf_edges, dtype=np.int64)
272
+ if edges.ndim == 2 and edges.shape[1] >= 2 and edges.shape[0] > 0:
273
+ c0_max = int(np.max(edges[:, 0]))
274
+ c1_max = int(np.max(edges[:, 1]))
275
+ return max(c0_max, c1_max) + 1
276
+
277
+ return 0
278
+
279
+
280
+ def _extract_face_space_pairs(sf_edges: np.ndarray | None, n_faces: int, n_spaces: int) -> list[tuple[int, int]]:
281
+ if sf_edges is None or n_faces <= 0 or n_spaces <= 0:
282
+ return []
283
+
284
+ edges = np.asarray(sf_edges, dtype=np.int64)
285
+ if edges.ndim != 2 or edges.shape[1] < 2:
286
+ return []
287
+
288
+ # PACK building npz uses [face_idx, space_idx] in sf_edges.
289
+ c0_max = int(np.max(edges[:, 0])) if edges.shape[0] > 0 else -1
290
+ c1_max = int(np.max(edges[:, 1])) if edges.shape[0] > 0 else -1
291
+ face_space_ok = c0_max < n_faces and c1_max < n_spaces
292
+ space_face_ok = c1_max < n_faces and c0_max < n_spaces
293
+ use_face_space = True
294
+ if face_space_ok and not space_face_ok:
295
+ use_face_space = True
296
+ elif space_face_ok and not face_space_ok:
297
+ use_face_space = False
298
+
299
+ pairs: list[tuple[int, int]] = []
300
+ for e in edges:
301
+ a, b = int(e[0]), int(e[1])
302
+ if use_face_space:
303
+ f_idx, s_idx = a, b
304
+ else:
305
+ f_idx, s_idx = b, a
306
+
307
+ if 0 <= f_idx < n_faces and 0 <= s_idx < n_spaces:
308
+ pairs.append((f_idx, s_idx))
309
+
310
+ return pairs
311
+
312
+
313
+ def _infer_space_centers_from_edges_indexed(face_centers: np.ndarray, sf_edges: np.ndarray | None, n_spaces: int) -> np.ndarray:
314
+ if len(face_centers) == 0 or n_spaces <= 0:
315
+ return np.zeros((0, 3), dtype=float)
316
+
317
+ pairs = _extract_face_space_pairs(sf_edges, n_faces=len(face_centers), n_spaces=n_spaces)
318
+ if not pairs:
319
+ return np.full((n_spaces, 3), np.nan, dtype=float)
320
+
321
+ buckets: list[list[np.ndarray]] = [[] for _ in range(n_spaces)]
322
+ for f_idx, s_idx in pairs:
323
+ buckets[s_idx].append(face_centers[f_idx])
324
+
325
+ centers = np.full((n_spaces, 3), np.nan, dtype=float)
326
+ for s_idx, pts in enumerate(buckets):
327
+ if pts:
328
+ centers[s_idx] = np.mean(np.asarray(pts, dtype=float), axis=0)
329
+
330
+ return centers
331
+
332
+
333
+ def _resolve_space_layout(
334
+ face_centers: np.ndarray,
335
+ graph_data: dict[str, np.ndarray],
336
+ ) -> tuple[np.ndarray, list[tuple[int, int]], int, dict[int, int]]:
337
+ sf_edges_raw = _first_existing(graph_data, ["sf_edges", "face_space_edges"])
338
+ sf_edges = np.asarray(sf_edges_raw, dtype=np.int64) if sf_edges_raw is not None else np.zeros((0, 2), dtype=np.int64)
339
+
340
+ explicit_space_centers = _as_2d_points(_first_existing(graph_data, ["space_c", "space_centers"]))
341
+ if len(explicit_space_centers) > 0:
342
+ raw_space_count = int(explicit_space_centers.shape[0])
343
+ raw_to_compact = {int(i): int(i) for i in range(raw_space_count)}
344
+ space_centers = explicit_space_centers
345
+ else:
346
+ raw_space_count = _infer_space_count(graph_data, sf_edges)
347
+ raw_space_centers = _infer_space_centers_from_edges_indexed(face_centers, sf_edges, n_spaces=raw_space_count)
348
+ if len(raw_space_centers) == 0:
349
+ return np.zeros((0, 3), dtype=float), [], raw_space_count, {}
350
+
351
+ valid_mask = np.isfinite(raw_space_centers).all(axis=1)
352
+ valid_raw_idx = np.where(valid_mask)[0].astype(np.int64)
353
+ raw_to_compact = {int(raw_idx): int(compact_idx) for compact_idx, raw_idx in enumerate(valid_raw_idx.tolist())}
354
+ space_centers = raw_space_centers[valid_mask]
355
+
356
+ pairs_raw = _extract_face_space_pairs(sf_edges, n_faces=len(face_centers), n_spaces=raw_space_count)
357
+ pairs_compact = [(f_idx, raw_to_compact[s_idx]) for f_idx, s_idx in pairs_raw if s_idx in raw_to_compact]
358
+ return space_centers, pairs_compact, raw_space_count, raw_to_compact
359
+
360
+
361
+ def _plot_generic_graph(ax, graph_data: dict[str, np.ndarray]) -> bool:
362
+ centers = _first_existing(graph_data, ["c", "center", "centers", "node_c", "node_centers", "face_c"])
363
+ points = _as_2d_points(centers)
364
+ if len(points) == 0:
365
+ return False
366
+
367
+ type_arr = _first_existing(graph_data, ["t", "type", "node_t", "node_type", "types"])
368
+ node_types = _decode_types(type_arr, expected_count=len(points), fallback="wall")
369
+ node_colors = [TYPE_COLORS.get(t, "#8ECFC9") for t in node_types]
370
+
371
+ ax.scatter(points[:, 0], points[:, 1], points[:, 2], c=node_colors, s=30, edgecolors="k", alpha=0.95)
372
+
373
+ edge_arr = _first_existing(graph_data, ["edges", "edge_index", "ff_edges"])
374
+ if edge_arr is not None:
375
+ edges = np.asarray(edge_arr, dtype=np.int64)
376
+ if edges.ndim == 2 and edges.shape[1] >= 2:
377
+ valid = (edges[:, 0] >= 0) & (edges[:, 1] >= 0) & (edges[:, 0] < len(points)) & (edges[:, 1] < len(points))
378
+ valid_edges = edges[valid]
379
+ if len(valid_edges) > 0:
380
+ starts = points[valid_edges[:, 0]]
381
+ ends = points[valid_edges[:, 1]]
382
+ _plot_edges(ax, starts, ends, color="#666666", linewidth=1.0, linestyle="-", alpha=0.45)
383
+
384
+ x_lim, y_lim, z_lim = _axis_limits_from_points(points)
385
+ ax.set_xlim(*x_lim)
386
+ ax.set_ylim(*y_lim)
387
+ ax.set_zlim(*z_lim)
388
+ return True
389
+
390
+
391
+ def _plot_pack_graph(ax, graph_data: dict[str, np.ndarray], geometry_npz: Path | None) -> bool:
392
+ face_centers = _as_2d_points(_first_existing(graph_data, ["face_c"]))
393
+
394
+ if len(face_centers) == 0 and geometry_npz is not None:
395
+ with np.load(geometry_npz, allow_pickle=True) as g_data:
396
+ if "face_c" in g_data:
397
+ face_centers = _as_2d_points(np.asarray(g_data["face_c"]))
398
+
399
+ space_centers, pairs_compact, _, _ = _resolve_space_layout(face_centers, graph_data)
400
+
401
+ if len(face_centers) == 0 and len(space_centers) == 0:
402
+ return False
403
+
404
+ face_types = _decode_types(_first_existing(graph_data, ["face_t", "face_type", "t", "type"]), len(face_centers), "wall")
405
+ face_colors = [TYPE_COLORS.get(t, "#8ECFC9") for t in face_types]
406
+ binary_colors = _face_colors_from_binary_t(_first_existing(graph_data, ["face_feats"]), len(face_centers))
407
+ if binary_colors is not None:
408
+ face_colors = binary_colors
409
+
410
+ if len(face_centers) > 0:
411
+ ax.scatter(face_centers[:, 0], face_centers[:, 1], face_centers[:, 2], c=face_colors, s=28, marker="D", edgecolors="k", alpha=0.9)
412
+ if len(space_centers) > 0:
413
+ ax.scatter(space_centers[:, 0], space_centers[:, 1], space_centers[:, 2], c=TYPE_COLORS["space"], s=70, edgecolors="k", alpha=0.95)
414
+
415
+ ff_edges = _first_existing(graph_data, ["ff_edges", "face_edges"])
416
+ if ff_edges is not None and len(face_centers) > 0:
417
+ edges = np.asarray(ff_edges, dtype=np.int64)
418
+ if edges.ndim == 2 and edges.shape[1] >= 2:
419
+ valid = (edges[:, 0] >= 0) & (edges[:, 1] >= 0) & (edges[:, 0] < len(face_centers)) & (edges[:, 1] < len(face_centers))
420
+ edge_ok = edges[valid]
421
+ if len(edge_ok) > 0:
422
+ _plot_edges(
423
+ ax,
424
+ face_centers[edge_ok[:, 0]],
425
+ face_centers[edge_ok[:, 1]],
426
+ color="#999999",
427
+ linewidth=1.2,
428
+ linestyle="-",
429
+ alpha=0.35,
430
+ )
431
+
432
+ if len(face_centers) > 0 and len(space_centers) > 0 and pairs_compact:
433
+ starts = np.asarray([face_centers[f_idx] for f_idx, _ in pairs_compact], dtype=float)
434
+ ends = np.asarray([space_centers[s_idx] for _, s_idx in pairs_compact], dtype=float)
435
+ _plot_edges(
436
+ ax,
437
+ starts,
438
+ ends,
439
+ color="#555555",
440
+ linewidth=1.8,
441
+ linestyle="--",
442
+ alpha=0.9,
443
+ )
444
+
445
+ all_points = face_centers if len(space_centers) == 0 else np.vstack([face_centers, space_centers])
446
+ x_lim, y_lim, z_lim = _axis_limits_from_points(all_points)
447
+ ax.set_xlim(*x_lim)
448
+ ax.set_ylim(*y_lim)
449
+ ax.set_zlim(*z_lim)
450
+ return True
451
+
452
+
453
+ def _plot_pack_graph_overlay(
454
+ ax,
455
+ graph_data: dict[str, np.ndarray],
456
+ energy_data: dict[str, np.ndarray],
457
+ geometry_npz: Path | None,
458
+ *,
459
+ elev: float,
460
+ azim: float,
461
+ panel_scale: float,
462
+ day_step: int,
463
+ ) -> bool:
464
+ face_centers = _as_2d_points(_first_existing(graph_data, ["face_c"]))
465
+ if len(face_centers) == 0 and geometry_npz is not None:
466
+ with np.load(geometry_npz, allow_pickle=True) as g_data:
467
+ if "face_c" in g_data:
468
+ face_centers = _as_2d_points(np.asarray(g_data["face_c"]))
469
+
470
+ if len(face_centers) == 0:
471
+ return False
472
+
473
+ if "values" not in energy_data:
474
+ return False
475
+
476
+ space_centers, pairs_compact, raw_space_count, raw_to_compact = _resolve_space_layout(face_centers, graph_data)
477
+ if len(space_centers) == 0:
478
+ return False
479
+
480
+ hourly_zone = _to_hourly_zone(np.asarray(energy_data["values"], dtype=np.float32))
481
+ zone_count = int(hourly_zone.shape[1])
482
+ valid_energy_spaces_raw = graph_data.get("valid_energy_spaces")
483
+ valid_energy_spaces = _try_parse_space_indices(valid_energy_spaces_raw, zone_count=zone_count, space_count=raw_space_count)
484
+
485
+ zone_to_space: list[tuple[int, int]] = []
486
+ if valid_energy_spaces is not None and valid_energy_spaces.size >= zone_count:
487
+ for z_idx in range(zone_count):
488
+ raw_s_idx = int(valid_energy_spaces[z_idx])
489
+ compact_s_idx = raw_to_compact.get(raw_s_idx)
490
+ if compact_s_idx is not None:
491
+ zone_to_space.append((z_idx, compact_s_idx))
492
+ else:
493
+ for z_idx in range(min(zone_count, raw_space_count)):
494
+ compact_s_idx = raw_to_compact.get(z_idx)
495
+ if compact_s_idx is not None:
496
+ zone_to_space.append((z_idx, compact_s_idx))
497
+
498
+ if not zone_to_space:
499
+ fallback_n = min(zone_count, len(space_centers))
500
+ zone_to_space = [(z_idx, z_idx) for z_idx in range(fallback_n)]
501
+
502
+ if not zone_to_space:
503
+ return False
504
+
505
+ if hasattr(ax, "computed_zorder"):
506
+ ax.computed_zorder = True
507
+
508
+ ax.scatter(face_centers[:, 0], face_centers[:, 1], face_centers[:, 2], c="#A8A8A8", s=8, alpha=0.24, zorder=2)
509
+ ax.scatter(space_centers[:, 0], space_centers[:, 1], space_centers[:, 2], c="#6E6E6E", s=14, alpha=0.5, zorder=3)
510
+
511
+ if pairs_compact:
512
+ starts = np.asarray([space_centers[s_idx] for _, s_idx in pairs_compact], dtype=float)
513
+ ends = np.asarray([face_centers[f_idx] for f_idx, _ in pairs_compact], dtype=float)
514
+ _plot_edges(
515
+ ax,
516
+ starts,
517
+ ends,
518
+ color="#777777",
519
+ linewidth=0.8,
520
+ linestyle="--",
521
+ alpha=0.3,
522
+ )
523
+
524
+ all_points = np.vstack([face_centers, space_centers])
525
+ x_lim, y_lim, z_lim = _axis_limits_from_points(all_points)
526
+ span = max(x_lim[1] - x_lim[0], y_lim[1] - y_lim[0], z_lim[1] - z_lim[0])
527
+ panel_w = max(span * float(panel_scale) * DEFAULT_PANEL_SIZE_MULTIPLIER, 1e-4)
528
+ panel_h = panel_w * 0.75
529
+
530
+ right, up = _isometric_panel_basis(elev=elev, azim=azim)
531
+
532
+ for z_idx, s_idx in zone_to_space:
533
+ center = space_centers[s_idx]
534
+ mat = _zone_day_hour_matrix(hourly_zone[:, z_idx], day_step=day_step)
535
+
536
+ zmin = float(np.min(mat))
537
+ zmax = float(np.max(mat))
538
+ if zmin < 0.0 < zmax:
539
+ zone_norm = colors.TwoSlopeNorm(vmin=zmin, vcenter=0.0, vmax=zmax)
540
+ elif abs(zmax - zmin) < 1e-12:
541
+ zone_norm = colors.Normalize(vmin=zmin - 1.0, vmax=zmax + 1.0)
542
+ else:
543
+ zone_norm = colors.Normalize(vmin=zmin, vmax=zmax)
544
+
545
+ facecolors_rgba = DEFAULT_DIVERGING_CMAP(zone_norm(mat))
546
+
547
+ n_rows, n_cols = mat.shape
548
+ u = np.linspace(-0.5, 0.5, n_cols, dtype=float) * panel_w
549
+ v = np.linspace(-0.5, 0.5, n_rows, dtype=float) * panel_h
550
+ uu, vv = np.meshgrid(u, v)
551
+
552
+ x = center[0] + right[0] * uu + up[0] * vv
553
+ y = center[1] + right[1] * uu + up[1] * vv
554
+ z = center[2] + right[2] * uu + up[2] * vv
555
+
556
+ surf = ax.plot_surface(
557
+ x,
558
+ y,
559
+ z,
560
+ facecolors=facecolors_rgba,
561
+ shade=False,
562
+ linewidth=0.0,
563
+ antialiased=False,
564
+ alpha=DEFAULT_BILLBOARD_ALPHA,
565
+ )
566
+ if hasattr(surf, "set_zsort"):
567
+ surf.set_zsort("average")
568
+
569
+ c1 = center - 0.5 * panel_w * right - 0.5 * panel_h * up
570
+ c2 = center + 0.5 * panel_w * right - 0.5 * panel_h * up
571
+ c3 = center + 0.5 * panel_w * right + 0.5 * panel_h * up
572
+ c4 = center - 0.5 * panel_w * right + 0.5 * panel_h * up
573
+ border = np.asarray([c1, c2, c3, c4, c1], dtype=float)
574
+ ax.plot(
575
+ border[:, 0],
576
+ border[:, 1],
577
+ border[:, 2],
578
+ color="#000000",
579
+ linewidth=0.85,
580
+ alpha=1.0,
581
+ )
582
+
583
+ ax.set_xlim(*x_lim)
584
+ ax.set_ylim(*y_lim)
585
+ ax.set_zlim(*z_lim)
586
+ return True
587
+
588
+
589
+ def _looks_like_pack_graph(graph_data: dict[str, np.ndarray]) -> bool:
590
+ pack_keys = {"face_c", "sf_edges", "face_space_edges", "space_c", "space_centers", "face_feats", "ff_edges"}
591
+ return any(k in graph_data for k in pack_keys)
592
+
593
+
594
+ def visualize_graph(
595
+ graph_npz: str | Path,
596
+ output_png: str | Path,
597
+ *,
598
+ geometry_npz: str | Path | None = None,
599
+ elev: float = 35.0,
600
+ azim: float = 15.0,
601
+ dpi: int = 300,
602
+ ) -> Path:
603
+ """Render graph nodes/edges from graph npz using c/t/type conventions when available."""
604
+ graph_npz = Path(graph_npz)
605
+ output_png = Path(output_png)
606
+ geometry_path = Path(geometry_npz) if geometry_npz is not None else None
607
+
608
+ with np.load(graph_npz, allow_pickle=True) as data:
609
+ graph_data = {k: np.asarray(data[k]) for k in data.files}
610
+
611
+ fig = plt.figure(figsize=(4, 4))
612
+ ax = fig.add_subplot(111, projection="3d")
613
+ ax.view_init(elev=elev, azim=azim)
614
+ fig.subplots_adjust(left=0, right=1, top=1, bottom=0)
615
+
616
+ if _looks_like_pack_graph(graph_data):
617
+ ok = _plot_pack_graph(ax, graph_data, geometry_npz=geometry_path)
618
+ if not ok:
619
+ ok = _plot_generic_graph(ax, graph_data)
620
+ else:
621
+ ok = _plot_generic_graph(ax, graph_data)
622
+ if not ok:
623
+ ok = _plot_pack_graph(ax, graph_data, geometry_npz=geometry_path)
624
+
625
+ if not ok:
626
+ keys = ", ".join(sorted(graph_data.keys()))
627
+ plt.close(fig)
628
+ raise ValueError(f"Cannot parse graph centers/edges from {graph_npz}; keys=[{keys}]")
629
+
630
+ ax.set_box_aspect([1, 1, 1])
631
+ ax.set_axis_off()
632
+
633
+ output_png.parent.mkdir(parents=True, exist_ok=True)
634
+ fig.savefig(output_png, dpi=dpi, bbox_inches="tight", pad_inches=0.04, transparent=True)
635
+ plt.close(fig)
636
+ return output_png
637
+
638
+
639
+ def visualize_graph_overlap(
640
+ graph_npz: str | Path,
641
+ energy_npz: str | Path,
642
+ output_png: str | Path,
643
+ *,
644
+ geometry_npz: str | Path | None = None,
645
+ elev: float = 35.0,
646
+ azim: float = 15.0,
647
+ panel_scale: float = DEFAULT_PANEL_SCALE,
648
+ day_step: int = DEFAULT_DAY_STEP,
649
+ dpi: int = 300,
650
+ ) -> Path:
651
+ """Render PACK graph with per-space energy billboard overlays."""
652
+ graph_npz = Path(graph_npz)
653
+ energy_npz = Path(energy_npz)
654
+ output_png = Path(output_png)
655
+ geometry_path = Path(geometry_npz) if geometry_npz is not None else None
656
+
657
+ with np.load(graph_npz, allow_pickle=True) as data:
658
+ graph_data = {k: np.asarray(data[k]) for k in data.files}
659
+ with np.load(energy_npz, allow_pickle=True) as data:
660
+ energy_data = {k: np.asarray(data[k]) for k in data.files}
661
+
662
+ fig = plt.figure(figsize=(4, 4))
663
+ ax = fig.add_subplot(111, projection="3d")
664
+ ax.view_init(elev=elev, azim=azim)
665
+ fig.subplots_adjust(left=0, right=1, top=1, bottom=0)
666
+
667
+ ok = _plot_pack_graph_overlay(
668
+ ax,
669
+ graph_data,
670
+ energy_data,
671
+ geometry_npz=geometry_path,
672
+ elev=elev,
673
+ azim=azim,
674
+ panel_scale=panel_scale,
675
+ day_step=day_step,
676
+ )
677
+ if not ok:
678
+ ok = _plot_pack_graph(ax, graph_data, geometry_npz=geometry_path)
679
+ if not ok:
680
+ ok = _plot_generic_graph(ax, graph_data)
681
+ if not ok:
682
+ keys = ", ".join(sorted(graph_data.keys()))
683
+ plt.close(fig)
684
+ raise ValueError(f"Cannot parse graph centers/edges from {graph_npz}; keys=[{keys}]")
685
+
686
+ ax.set_box_aspect([1, 1, 1])
687
+ ax.set_axis_off()
688
+
689
+ output_png.parent.mkdir(parents=True, exist_ok=True)
690
+ fig.savefig(output_png, dpi=dpi, bbox_inches="tight", pad_inches=0.04, transparent=True)
691
+ plt.close(fig)
692
+ return output_png
visualization/energy.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from pathlib import Path
4
+
5
+ import matplotlib
6
+ import numpy as np
7
+
8
+ matplotlib.use("Agg")
9
+ import matplotlib.pyplot as plt
10
+
11
+ FIG_SIZE_IN = 4.2
12
+ MAX_ENERGY_SUBPLOTS = 6
13
+ AXIS_LABEL_FONT_SIZE = 8
14
+ TICK_LABEL_FONT_SIZE = 8
15
+ INPLOT_LABEL_FONT_SIZE = 10
16
+
17
+
18
+ def _to_hourly_zone(values: np.ndarray) -> np.ndarray:
19
+ arr = np.asarray(values)
20
+ if arr.ndim != 2:
21
+ raise ValueError(f"Expected 2D energy matrix, got shape={arr.shape}")
22
+
23
+ if arr.shape[0] == 8760:
24
+ return np.asarray(arr, dtype=float)
25
+ if arr.shape[1] == 8760:
26
+ return np.asarray(arr.T, dtype=float)
27
+
28
+ raise ValueError(f"Neither axis is 8760 for energy matrix: shape={arr.shape}")
29
+
30
+
31
+ def _decode_zone_names(columns_arr: np.ndarray | None, zone_count: int) -> list[str]:
32
+ if columns_arr is None:
33
+ return [f"zone_{i}" for i in range(zone_count)]
34
+
35
+ cols = np.asarray(columns_arr).reshape(-1)
36
+ names = [str(c, "utf-8") if isinstance(c, (bytes, np.bytes_)) else str(c) for c in cols]
37
+ if len(names) < zone_count:
38
+ names.extend([f"zone_{i}" for i in range(len(names), zone_count)])
39
+ return names[:zone_count]
40
+
41
+
42
+ def _time_window(hourly_zone: np.ndarray, start_hour: int, window_hours: int) -> tuple[np.ndarray, int, int]:
43
+ total = int(hourly_zone.shape[0])
44
+ if total < 1:
45
+ raise ValueError("No hourly energy records found")
46
+
47
+ start_idx = max(0, min(total - 1, int(start_hour) - 1))
48
+ window = max(1, int(window_hours))
49
+ end_idx = min(total, start_idx + window)
50
+ if end_idx <= start_idx:
51
+ raise ValueError(f"Invalid energy window: start={start_hour}, hours={window_hours}")
52
+
53
+ return hourly_zone[start_idx:end_idx, :], start_idx + 1, end_idx
54
+
55
+
56
+ def _major_ticks(length: int) -> list[int]:
57
+ if length <= 8:
58
+ return list(range(1, length + 1))
59
+
60
+ tick_count = 6
61
+ ticks = np.linspace(1, length, num=tick_count, dtype=int)
62
+ uniq = sorted(set(int(t) for t in ticks))
63
+ if uniq[-1] != length:
64
+ uniq.append(length)
65
+ return uniq
66
+
67
+
68
+ def visualize_energy(
69
+ energy_npz: str | Path,
70
+ output_png: str | Path,
71
+ *,
72
+ max_zones: int | None = None,
73
+ zone_index: int | None = None,
74
+ start_hour: int = 1,
75
+ window_hours: int = 24,
76
+ dpi: int = 220,
77
+ ) -> Path:
78
+ """Plot energy curves in compressed square layout with shared x-axis for a selected time window."""
79
+ energy_npz = Path(energy_npz)
80
+ output_png = Path(output_png)
81
+
82
+ with np.load(energy_npz, allow_pickle=True) as data:
83
+ if "values" not in data:
84
+ keys = ", ".join(sorted(data.files))
85
+ raise KeyError(f"Missing key 'values' in {energy_npz}; keys=[{keys}]")
86
+ values = np.asarray(data["values"], dtype=float)
87
+ columns = np.asarray(data["columns"], dtype=object) if "columns" in data else None
88
+
89
+ hourly_zone = _to_hourly_zone(values)
90
+ window, window_start, window_end = _time_window(hourly_zone, start_hour=start_hour, window_hours=window_hours)
91
+ zone_count = window.shape[1]
92
+ if zone_count < 1:
93
+ raise ValueError(f"No zones found in {energy_npz}")
94
+
95
+ names = _decode_zone_names(columns, zone_count)
96
+
97
+ zone_indices = list(range(zone_count))
98
+ if zone_index is not None:
99
+ zi = int(zone_index)
100
+ if zi < 0 or zi >= zone_count:
101
+ raise ValueError(f"zone_index out of range: {zi}, valid=[0, {zone_count - 1}]")
102
+ zone_indices = [zi]
103
+ elif max_zones is not None and max_zones > 0:
104
+ zone_indices = zone_indices[: max_zones]
105
+
106
+ if len(zone_indices) < 1:
107
+ raise ValueError("No zones selected for plotting")
108
+
109
+ plotted_zone_indices = zone_indices[:MAX_ENERGY_SUBPLOTS]
110
+ omitted_count = len(zone_indices) - len(plotted_zone_indices)
111
+
112
+ cmap = plt.get_cmap("tab20")
113
+ x = np.arange(1, window.shape[0] + 1, dtype=int)
114
+ major_ticks = _major_ticks(window.shape[0])
115
+
116
+ row_count = len(plotted_zone_indices)
117
+ fig, axes = plt.subplots(
118
+ row_count,
119
+ 1,
120
+ figsize=(FIG_SIZE_IN, FIG_SIZE_IN),
121
+ sharex=True,
122
+ gridspec_kw={"hspace": 0.0},
123
+ )
124
+ if row_count == 1:
125
+ axes = [axes]
126
+
127
+ for row_idx, zone_idx in enumerate(plotted_zone_indices):
128
+ ax = axes[row_idx]
129
+ color = cmap(row_idx % 20)
130
+ ax.plot(x, window[:, zone_idx], color=color, linewidth=0.9, alpha=0.9)
131
+ ax.set_ylabel("")
132
+ ax.text(
133
+ 0.02,
134
+ 0.86,
135
+ names[zone_idx],
136
+ transform=ax.transAxes,
137
+ ha="left",
138
+ va="top",
139
+ rotation=0,
140
+ fontsize=INPLOT_LABEL_FONT_SIZE,
141
+ bbox={"facecolor": "white", "alpha": 0.65, "edgecolor": "none", "pad": 1.5},
142
+ )
143
+ ax.set_xticks(major_ticks)
144
+ ax.grid(axis="y", alpha=0.25, linewidth=0.5)
145
+ ax.grid(axis="x", alpha=0.22, linewidth=0.45)
146
+ ax.tick_params(axis="both", which="both", labelsize=TICK_LABEL_FONT_SIZE)
147
+ if row_idx < row_count - 1:
148
+ ax.tick_params(axis="x", which="both", labelbottom=False)
149
+
150
+ if omitted_count > 0:
151
+ axes[-1].text(
152
+ 0.98,
153
+ 0.86,
154
+ f"... (+{omitted_count})",
155
+ transform=axes[-1].transAxes,
156
+ ha="right",
157
+ va="top",
158
+ fontsize=INPLOT_LABEL_FONT_SIZE,
159
+ bbox={"facecolor": "white", "alpha": 0.65, "edgecolor": "none", "pad": 1.5},
160
+ )
161
+
162
+ axes[-1].set_xlabel(f"hour index in window ({window_start}-{window_end})", fontsize=AXIS_LABEL_FONT_SIZE)
163
+ axes[-1].set_xlim(1, window.shape[0] + 0.5)
164
+ axes[-1].set_xticks(major_ticks)
165
+ axes[-1].set_xticklabels([str(t) for t in major_ticks], fontsize=TICK_LABEL_FONT_SIZE)
166
+ fig.subplots_adjust(left=0.18, right=0.94, bottom=0.14, top=0.98, hspace=0.0)
167
+
168
+ output_png.parent.mkdir(parents=True, exist_ok=True)
169
+ fig.savefig(output_png, dpi=dpi)
170
+ plt.close(fig)
171
+ return output_png
visualization/geometry.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from pathlib import Path
4
+
5
+ import matplotlib
6
+ import numpy as np
7
+ from mpl_toolkits.mplot3d.art3d import Poly3DCollection
8
+
9
+ matplotlib.use("Agg")
10
+ import matplotlib.pyplot as plt
11
+
12
+
13
+ def _axis_limits_from_points(points: np.ndarray) -> tuple[tuple[float, float], tuple[float, float], tuple[float, float]]:
14
+ x_min, x_max = float(np.min(points[:, 0])), float(np.max(points[:, 0]))
15
+ y_min, y_max = float(np.min(points[:, 1])), float(np.max(points[:, 1]))
16
+ z_min, z_max = float(np.min(points[:, 2])), float(np.max(points[:, 2]))
17
+
18
+ max_range = max(x_max - x_min, y_max - y_min, z_max - z_min) / 2.0
19
+ max_range = max(max_range, 1e-6) * 1.08
20
+
21
+ x_mid = (x_max + x_min) / 2.0
22
+ y_mid = (y_max + y_min) / 2.0
23
+ z_mid = (z_max + z_min) / 2.0
24
+
25
+ return (
26
+ (x_mid - max_range, x_mid + max_range),
27
+ (y_mid - max_range, y_mid + max_range),
28
+ (z_mid - max_range, z_mid + max_range),
29
+ )
30
+
31
+
32
+ def _to_vertices(face_v_item: object) -> np.ndarray | None:
33
+ verts = np.asarray(face_v_item, dtype=float)
34
+ if verts.ndim == 2 and verts.shape[1] == 3 and len(verts) >= 3:
35
+ return verts
36
+
37
+ flat = verts.reshape(-1)
38
+ if flat.size >= 9 and flat.size % 3 == 0:
39
+ shaped = flat.reshape(-1, 3)
40
+ if len(shaped) >= 3:
41
+ return shaped
42
+
43
+ return None
44
+
45
+
46
+ def visualize_geometry(
47
+ geometry_npz: str | Path,
48
+ output_png: str | Path,
49
+ *,
50
+ elev: float = 45.0,
51
+ azim: float = 15.0,
52
+ dpi: int = 300,
53
+ ) -> Path:
54
+ """Render geometry polygons from PACK geometry npz (expects key: face_v)."""
55
+ geometry_npz = Path(geometry_npz)
56
+ output_png = Path(output_png)
57
+
58
+ with np.load(geometry_npz, allow_pickle=True) as data:
59
+ if "face_v" not in data:
60
+ keys = ", ".join(sorted(data.files))
61
+ raise KeyError(f"Missing key 'face_v' in {geometry_npz}; keys=[{keys}]")
62
+ face_v = data["face_v"]
63
+
64
+ polygons: list[np.ndarray] = []
65
+ for item in face_v:
66
+ verts = _to_vertices(item)
67
+ if verts is not None:
68
+ polygons.append(verts)
69
+
70
+ if not polygons:
71
+ raise ValueError(f"No valid polygons in {geometry_npz}")
72
+
73
+ fig = plt.figure(figsize=(4, 4))
74
+ ax = fig.add_subplot(111, projection="3d")
75
+ ax.view_init(elev=elev, azim=azim)
76
+ fig.subplots_adjust(left=0, right=1, top=1, bottom=0)
77
+
78
+ for verts in polygons:
79
+ collection = Poly3DCollection(
80
+ [verts],
81
+ facecolors="#FFFFFF",
82
+ edgecolors="#4D4D4D",
83
+ linewidths=0.42,
84
+ alpha=0.35,
85
+ )
86
+ ax.add_collection3d(collection)
87
+
88
+ all_points = np.vstack(polygons)
89
+ x_lim, y_lim, z_lim = _axis_limits_from_points(all_points)
90
+ ax.set_xlim(*x_lim)
91
+ ax.set_ylim(*y_lim)
92
+ ax.set_zlim(*z_lim)
93
+ ax.set_box_aspect([1, 1, 1])
94
+ ax.set_axis_off()
95
+
96
+ output_png.parent.mkdir(parents=True, exist_ok=True)
97
+ fig.savefig(output_png, dpi=dpi, bbox_inches="tight", pad_inches=0.04, transparent=True)
98
+ plt.close(fig)
99
+ return output_png
visualization/weather.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from pathlib import Path
4
+
5
+ import matplotlib
6
+ import numpy as np
7
+
8
+ matplotlib.use("Agg")
9
+ import matplotlib.pyplot as plt
10
+
11
+ DEFAULT_WEATHER_COLUMNS = [
12
+ "dry_bulb",
13
+ "dew_point",
14
+ "relative_humidity",
15
+ "global_horizontal_radiation",
16
+ "direct_normal_radiation",
17
+ "diffuse_horizontal_radiation",
18
+ "wind_speed",
19
+ ]
20
+
21
+ WEATHER_UNITS = {
22
+ "dry_bulb": "degC",
23
+ "dew_point": "degC",
24
+ "relative_humidity": "%",
25
+ "global_horizontal_radiation": "W/m2",
26
+ "direct_normal_radiation": "W/m2",
27
+ "diffuse_horizontal_radiation": "W/m2",
28
+ "wind_direction": "deg",
29
+ }
30
+
31
+ FIG_SIZE_IN = 4.2
32
+ AXIS_LABEL_FONT_SIZE = 8
33
+ TICK_LABEL_FONT_SIZE = 8
34
+ INPLOT_LABEL_FONT_SIZE = 10
35
+
36
+
37
+ def _to_hourly_feature(values: np.ndarray) -> np.ndarray:
38
+ arr = np.asarray(values)
39
+ if arr.ndim != 2:
40
+ raise ValueError(f"Expected 2D weather matrix, got shape={arr.shape}")
41
+
42
+ if arr.shape[0] == 8760:
43
+ return np.asarray(arr, dtype=float)
44
+ if arr.shape[1] == 8760:
45
+ return np.asarray(arr.T, dtype=float)
46
+
47
+ raise ValueError(f"Neither axis is 8760 for weather matrix: shape={arr.shape}")
48
+
49
+
50
+ def _decode_columns(columns_arr: np.ndarray | None, width: int) -> list[str]:
51
+ if columns_arr is None:
52
+ return [f"feature_{i}" for i in range(width)]
53
+
54
+ cols = np.asarray(columns_arr).reshape(-1)
55
+ names = [str(c, "utf-8") if isinstance(c, (bytes, np.bytes_)) else str(c) for c in cols]
56
+ if len(names) < width:
57
+ names.extend([f"feature_{i}" for i in range(len(names), width)])
58
+ return names[:width]
59
+
60
+
61
+ def _pick_weather_indices(column_names: list[str]) -> list[int]:
62
+ lower_to_idx = {name.lower(): idx for idx, name in enumerate(column_names)}
63
+
64
+ selected: list[int] = []
65
+ for name in DEFAULT_WEATHER_COLUMNS:
66
+ idx = lower_to_idx.get(name.lower())
67
+ if idx is not None:
68
+ selected.append(idx)
69
+
70
+ if len(selected) < 7:
71
+ for idx in range(len(column_names)):
72
+ if idx not in selected:
73
+ selected.append(idx)
74
+ if len(selected) == 7:
75
+ break
76
+
77
+ return selected
78
+
79
+
80
+ def _time_window(hourly: np.ndarray, start_hour: int, window_hours: int) -> tuple[np.ndarray, int, int]:
81
+ total = int(hourly.shape[0])
82
+ if total < 1:
83
+ raise ValueError("No hourly weather records found")
84
+
85
+ start_idx = max(0, min(total - 1, int(start_hour) - 1))
86
+ window = max(1, int(window_hours))
87
+ end_idx = min(total, start_idx + window)
88
+ if end_idx <= start_idx:
89
+ raise ValueError(f"Invalid weather window: start={start_hour}, hours={window_hours}")
90
+
91
+ return hourly[start_idx:end_idx, :], start_idx + 1, end_idx
92
+
93
+
94
+ def _major_ticks(length: int) -> list[int]:
95
+ if length <= 8:
96
+ return list(range(1, length + 1))
97
+
98
+ tick_count = 6
99
+ ticks = np.linspace(1, length, num=tick_count, dtype=int)
100
+ uniq = sorted(set(int(t) for t in ticks))
101
+ if uniq[-1] != length:
102
+ uniq.append(length)
103
+ return uniq
104
+
105
+
106
+ def _label_with_unit(name: str) -> str:
107
+ key = name.strip().lower().replace(" ", "_").replace("-", "_")
108
+ unit = WEATHER_UNITS.get(key)
109
+ if unit is None:
110
+ return name
111
+ return f"{name} ({unit})"
112
+
113
+
114
+ def visualize_weather(
115
+ weather_npz: str | Path,
116
+ output_png: str | Path,
117
+ *,
118
+ start_hour: int = 1,
119
+ window_hours: int = 24,
120
+ dpi: int = 220,
121
+ ) -> Path:
122
+ """Plot weather subplots from PACK weather npz (values + columns) in a selected time window."""
123
+ weather_npz = Path(weather_npz)
124
+ output_png = Path(output_png)
125
+
126
+ with np.load(weather_npz, allow_pickle=True) as data:
127
+ if "values" not in data:
128
+ keys = ", ".join(sorted(data.files))
129
+ raise KeyError(f"Missing key 'values' in {weather_npz}; keys=[{keys}]")
130
+ values = np.asarray(data["values"], dtype=float)
131
+ columns = np.asarray(data["columns"], dtype=object) if "columns" in data else None
132
+
133
+ hourly = _to_hourly_feature(values)
134
+ window, window_start, window_end = _time_window(hourly, start_hour=start_hour, window_hours=window_hours)
135
+ names = _decode_columns(columns, window.shape[1])
136
+ idx_list = _pick_weather_indices(names)
137
+ if len(idx_list) == 0:
138
+ raise ValueError(f"No weather series available in {weather_npz}")
139
+
140
+ fig, axes = plt.subplots(
141
+ len(idx_list),
142
+ 1,
143
+ figsize=(FIG_SIZE_IN, FIG_SIZE_IN),
144
+ sharex=True,
145
+ gridspec_kw={"hspace": 0.0},
146
+ )
147
+ if len(idx_list) == 1:
148
+ axes = [axes]
149
+
150
+ x = np.arange(1, window.shape[0] + 1, dtype=int)
151
+ major_ticks = _major_ticks(window.shape[0])
152
+ for row_idx, feat_idx in enumerate(idx_list):
153
+ ax = axes[row_idx]
154
+ y = window[:, feat_idx]
155
+ ax.plot(x, y, linewidth=0.9, color="#4C72B0")
156
+ ax.set_ylabel("")
157
+ ax.text(
158
+ 0.02,
159
+ 0.86,
160
+ _label_with_unit(names[feat_idx]),
161
+ transform=ax.transAxes,
162
+ ha="left",
163
+ va="top",
164
+ rotation=0,
165
+ fontsize=INPLOT_LABEL_FONT_SIZE,
166
+ bbox={"facecolor": "white", "alpha": 0.65, "edgecolor": "none", "pad": 1.5},
167
+ )
168
+ ax.set_xticks(major_ticks)
169
+ ax.grid(axis="y", alpha=0.3, linewidth=0.5)
170
+ ax.grid(axis="x", alpha=0.22, linewidth=0.45)
171
+ ax.tick_params(axis="both", which="both", labelsize=TICK_LABEL_FONT_SIZE)
172
+ if row_idx < len(idx_list) - 1:
173
+ ax.tick_params(axis="x", which="both", labelbottom=False)
174
+
175
+ axes[-1].set_xlabel(f"hour index in window ({window_start}-{window_end})", fontsize=AXIS_LABEL_FONT_SIZE)
176
+ axes[-1].set_xlim(1, window.shape[0] + 0.5)
177
+ axes[-1].set_xticks(major_ticks)
178
+ axes[-1].set_xticklabels([str(t) for t in major_ticks], fontsize=TICK_LABEL_FONT_SIZE)
179
+ fig.subplots_adjust(left=0.18, right=0.94, bottom=0.14, top=0.98, hspace=0.0)
180
+
181
+ output_png.parent.mkdir(parents=True, exist_ok=True)
182
+ fig.savefig(output_png, dpi=dpi)
183
+ plt.close(fig)
184
+ return output_png