Upload 4 files
Browse files- Inference_&_LLM/digital_human_in_the_loop.py +211 -0
- Inference_&_LLM/inference.py +183 -0
- Inference_&_LLM/inference_server.py +157 -0
- Inference_&_LLM/llm_client +17 -0
Inference_&_LLM/digital_human_in_the_loop.py
ADDED
|
@@ -0,0 +1,211 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
import json
|
| 3 |
+
import re
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
# 0. HELPER FUNCTIONS
|
| 7 |
+
OLLAMA_URL = ""
|
| 8 |
+
DEFAULT_MODEL = "deepseek-R1" # run: ollama pull deepseek-v2, R1, etc.
|
| 9 |
+
|
| 10 |
+
def query_llm(system_prompt: str, user_prompt: str, model: str = DEFAULT_MODEL) -> str:
|
| 11 |
+
payload = {
|
| 12 |
+
"model": model,
|
| 13 |
+
"system": system_prompt,
|
| 14 |
+
"prompt": user_prompt,
|
| 15 |
+
"stream": False,
|
| 16 |
+
"options": {
|
| 17 |
+
"temperature": 0.2,
|
| 18 |
+
"num_predict": 256
|
| 19 |
+
}
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
try:
|
| 23 |
+
response = requests.post(OLLAMA_URL, json=payload, timeout=30)
|
| 24 |
+
response.raise_for_status()
|
| 25 |
+
data = response.json()
|
| 26 |
+
return data.get("response", "").strip()
|
| 27 |
+
except Exception as e:
|
| 28 |
+
print(f"[LLM Error]{model}: {e}")
|
| 29 |
+
return ""
|
| 30 |
+
|
| 31 |
+
def parse_json_response(response_text: str) -> dict:
|
| 32 |
+
|
| 33 |
+
try:
|
| 34 |
+
# 0. Clean common LLM formatting issues
|
| 35 |
+
clean_text = re.sub(r'```json\s*', '', response_text)
|
| 36 |
+
clean_text = re.sub(r'```', '', clean_text)
|
| 37 |
+
clean_text = re.sub(r':\s*\+(\d)', r': \1', clean_text)
|
| 38 |
+
return json.loads(clean_text)
|
| 39 |
+
|
| 40 |
+
except json.JSONDecodeError:
|
| 41 |
+
match = re.search(r'\{.*\}', response_text, re.DOTALL)
|
| 42 |
+
if match:
|
| 43 |
+
json_str = match.group(0)
|
| 44 |
+
json_str = re.sub(r':\s*\+(\d)', r': \1', json_str)
|
| 45 |
+
try:
|
| 46 |
+
return json.loads(json_str)
|
| 47 |
+
except json.JSONDecodeError:
|
| 48 |
+
pass
|
| 49 |
+
|
| 50 |
+
# 3. Fallback
|
| 51 |
+
print(f"[Parser Error] {response_text}")
|
| 52 |
+
return {"zone_1": 0.0, "zone_2": 0.0, "zone_3": 0.0, "zone_4": 0.0, "zone_5": 0.0}
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
# 1. THE SYSTEM PROMPT
|
| 57 |
+
SYSTEM_PROMPT = """
|
| 58 |
+
You are the **Digital Zone Comfort Manager** for a commercial building.
|
| 59 |
+
Your role is to simulate the **Thermal Sensation Vote (TSV)** for occupants in 5 distinct zones.
|
| 60 |
+
You are NOT controlling the HVAC directly. You are a "Soft Sensor" providing feedback to the Building Controller.
|
| 61 |
+
|
| 62 |
+
### 1. THE CONTEXTUAL PHYSICS
|
| 63 |
+
Human comfort is not just temperature. It depends on:
|
| 64 |
+
* **Metabolic Rate (MET):** High activity = generates heat = prefers cold.
|
| 65 |
+
* **Clothing Insulation (CLO):** Heavy clothes = retains heat = prefers cold.
|
| 66 |
+
* **Acclimatization:** * If Location is **HOT** (e.g., Dubai), occupants tolerate warmth better but are sensitive to "cold shock."
|
| 67 |
+
* If Location is **COLD** (e.g., Alaska), occupants wear heavier street clothes and tolerate cooler indoor temps better.
|
| 68 |
+
* **Radiant Asymmetry:** Zones near windows feel hotter when sunny due to solar gain.
|
| 69 |
+
|
| 70 |
+
### 2. THE 5 ZONE PERSONAS (Your Managers)
|
| 71 |
+
Adopt the mindset of the specific occupants in each zone to cast your vote:
|
| 72 |
+
|
| 73 |
+
* **Zone 1 (Core - General Office):**
|
| 74 |
+
* *Profile:* Standard Office (MET 1.1, CLO 0.7).
|
| 75 |
+
* *Mindset:* "I am the average user. I like 22-23C. I hate drafts."
|
| 76 |
+
* **Zone 2 (Perimeter - Executives):**
|
| 77 |
+
* *Profile:* Formal Suits (MET 1.0, CLO 1.0). **High Insulation.**
|
| 78 |
+
* *Mindset:* "I am wearing a three-piece suit. I overheat easily. Keep it crisp and cool (20-21C)."
|
| 79 |
+
* **Zone 3 (Lab - Active Work):**
|
| 80 |
+
* *Profile:* Standing/Walking (MET 1.4, CLO 0.6). **High Internal Heat.**
|
| 81 |
+
* *Mindset:* "I am moving around constantly. If it's above 21C, I start sweating. I need cooling."
|
| 82 |
+
* **Zone 4 (Call Center - Sedentary):**
|
| 83 |
+
* *Profile:* Light Summer Wear (MET 1.0, CLO 0.5). **Low Insulation.**
|
| 84 |
+
* *Mindset:* "I am sitting still in a t-shirt. I freeze instantly. I need it warm (23-24C)."
|
| 85 |
+
* **Zone 5 (Break Room - Transients):**
|
| 86 |
+
* *Profile:* Eating/Walking (MET 1.6, CLO 0.7). **Variable.**
|
| 87 |
+
* *Mindset:* "I'm just passing through or eating hot food. I tolerate cold well, but stuffy heat is gross."
|
| 88 |
+
|
| 89 |
+
### 3. SCORING SCALE
|
| 90 |
+
Vote on this integer scale based on how that *specific persona* would feel:
|
| 91 |
+
* **-3 (Cold):** shivering, requesting heat immediately.
|
| 92 |
+
* **-2 (Cool):** uncomfortable, distraction from work.
|
| 93 |
+
* **-1 (Slightly Cool):** acceptable but noticed.
|
| 94 |
+
* **0 (Neutral):** optimal, unnoticed.
|
| 95 |
+
* **+1 (Slightly Warm):** acceptable but noticed.
|
| 96 |
+
* **+2 (Warm):** uncomfortable, distraction from work.
|
| 97 |
+
* **+3 (Hot):** sweating, requesting cooling immediately.
|
| 98 |
+
|
| 99 |
+
### 4. OUTPUT RULES
|
| 100 |
+
1. **Analyze** the provided inputs (Location, Time, Weather, Indoor State).
|
| 101 |
+
2. **Simulate** the specific physics for each zone (e.g., Zone 2 is near a window on a sunny day -> add virtual heat load).
|
| 102 |
+
3. **Vote** strictly as a JSON object. If occupancy is 0, output 0.0.
|
| 103 |
+
|
| 104 |
+
### OUTPUT FORMAT
|
| 105 |
+
Return **ONLY** a valid JSON object. Do not use plus signs (+).
|
| 106 |
+
{
|
| 107 |
+
"zone_1": <float>,
|
| 108 |
+
"zone_2": <float>,
|
| 109 |
+
"zone_3": <float>,
|
| 110 |
+
"zone_4": <float>,
|
| 111 |
+
"zone_5": <float>
|
| 112 |
+
}
|
| 113 |
+
"""
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
# 2. THE INPUT TEMPLATE
|
| 117 |
+
def create_llm_input(env_map):
|
| 118 |
+
# Extract Global Context (with defaults if missing)
|
| 119 |
+
location = env_map.get('location', 'Standard Climate')
|
| 120 |
+
time_day = env_map.get('time_of_day', 'Daytime')
|
| 121 |
+
outdoor_temp = env_map.get('outdoor_temp', 20.0)
|
| 122 |
+
weather = env_map.get('weather_condition', 'Clear')
|
| 123 |
+
|
| 124 |
+
return f"""
|
| 125 |
+
GLOBAL CONTEXT:
|
| 126 |
+
- Location: {location} (Affects acclimatization expectations)
|
| 127 |
+
- Time of Day: {time_day}
|
| 128 |
+
- Weather: {weather} (Sunlight intensity affects window zones)
|
| 129 |
+
- Outdoor Temp: {outdoor_temp:.1f} C
|
| 130 |
+
|
| 131 |
+
[ZONE 1 - CORE OFFICE]
|
| 132 |
+
- Indoor Air: {env_map.get('core_temp', 22.0):.1f} C, {env_map.get('core_rh', 50):.0f}% RH
|
| 133 |
+
- Occupancy: {env_map.get('core_occ_count', 0)} people
|
| 134 |
+
- Features: Interior zone, no windows.
|
| 135 |
+
|
| 136 |
+
[ZONE 2 - EXECUTIVES (Suits)]
|
| 137 |
+
- Indoor Air: {env_map.get('perim1_temp', 22.0):.1f} C, {env_map.get('perim1_rh', 50):.0f}% RH
|
| 138 |
+
- Occupancy: {env_map.get('perim1_occ_count', 0)} people
|
| 139 |
+
- Features: Perimeter zone. **Direct Window Access.** (Sensitive to solar gain).
|
| 140 |
+
|
| 141 |
+
[ZONE 3 - LAB (Active)]
|
| 142 |
+
- Indoor Air: {env_map.get('perim2_temp', 22.0):.1f} C, {env_map.get('perim2_rh', 50):.0f}% RH
|
| 143 |
+
- Occupancy: {env_map.get('perim2_occ_count', 0)} people
|
| 144 |
+
- Features: Perimeter zone. North facing (Less sun).
|
| 145 |
+
|
| 146 |
+
[ZONE 4 - CALL CENTER (Light Clothes)]
|
| 147 |
+
- Indoor Air: {env_map.get('perim3_temp', 22.0):.1f} C, {env_map.get('perim3_rh', 50):.0f}% RH
|
| 148 |
+
- Occupancy: {env_map.get('perim3_occ_count', 0)} people
|
| 149 |
+
- Features: Perimeter zone. East facing.
|
| 150 |
+
|
| 151 |
+
[ZONE 5 - BREAK ROOM]
|
| 152 |
+
- Indoor Air: {env_map.get('perim4_temp', 22.0):.1f} C, {env_map.get('perim4_rh', 50):.0f}% RH
|
| 153 |
+
- Occupancy: {env_map.get('perim4_occ_count', 0)} people
|
| 154 |
+
- Features: Perimeter zone. West facing (Afternoon sun risk).
|
| 155 |
+
"""
|
| 156 |
+
|
| 157 |
+
# 3. THE SENSOR CLASS
|
| 158 |
+
|
| 159 |
+
class DigitalHumanSensor:
|
| 160 |
+
def __init__(self, model_name=DEFAULT_MODEL):
|
| 161 |
+
self.model_name = model_name
|
| 162 |
+
|
| 163 |
+
def get_comfort_votes(self, obs_dict):
|
| 164 |
+
user_input = create_llm_input(obs_dict)
|
| 165 |
+
|
| 166 |
+
print(f" >>> Querying {self.model_name} for comfort status...")
|
| 167 |
+
raw_response = query_llm(SYSTEM_PROMPT, user_input, model=self.model_name)
|
| 168 |
+
raw_ratings = parse_json_response(raw_response)
|
| 169 |
+
|
| 170 |
+
clean_ratings = {}
|
| 171 |
+
for zone, vote in raw_ratings.items():
|
| 172 |
+
try:
|
| 173 |
+
# Clamp between -3.0 and +3.0
|
| 174 |
+
val = float(vote)
|
| 175 |
+
clean_ratings[zone] = max(-3.0, min(3.0, val))
|
| 176 |
+
except (ValueError, TypeError):
|
| 177 |
+
clean_ratings[zone] = 0.0
|
| 178 |
+
|
| 179 |
+
return clean_ratings
|
| 180 |
+
|
| 181 |
+
# (Inference Server Simulation)
|
| 182 |
+
if __name__ == "__main__":
|
| 183 |
+
mock_env = {
|
| 184 |
+
# --- GLOBAL CONTEXT ---
|
| 185 |
+
'location': 'Dubai, UAE',
|
| 186 |
+
'time_of_day': '14:00 (Afternoon)',
|
| 187 |
+
'weather_condition': 'Sunny',
|
| 188 |
+
'outdoor_temp': 38.0,
|
| 189 |
+
|
| 190 |
+
# --- ZONE 1---
|
| 191 |
+
'core_temp': 23.0, 'core_rh': 45.0, 'core_occ_count': 10,
|
| 192 |
+
|
| 193 |
+
# --- ZONE 2 ---
|
| 194 |
+
'perim1_temp': 24.0, 'perim1_rh': 50.0, 'perim1_occ_count': 2,
|
| 195 |
+
|
| 196 |
+
# --- ZONE 3 ---
|
| 197 |
+
'perim2_temp': 22.0, 'perim2_rh': 50.0, 'perim2_occ_count': 5,
|
| 198 |
+
|
| 199 |
+
# --- ZONE 4 ) ---
|
| 200 |
+
'perim3_temp': 20.0, 'perim3_rh': 40.0, 'perim3_occ_count': 15,
|
| 201 |
+
|
| 202 |
+
# --- ZONE 5 ---
|
| 203 |
+
'perim4_temp': 22.5, 'perim4_rh': 50.0, 'perim4_occ_count': 0, # Empty
|
| 204 |
+
}
|
| 205 |
+
|
| 206 |
+
sensor = DigitalHumanSensor(model_name="deepseek-v2")
|
| 207 |
+
|
| 208 |
+
votes = sensor.get_comfort_votes(mock_env)
|
| 209 |
+
|
| 210 |
+
print("\n[Digital Human Feedback]")
|
| 211 |
+
print(json.dumps(votes, indent=2))
|
Inference_&_LLM/inference.py
ADDED
|
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gymnasium as gym
|
| 2 |
+
import sinergym # noqa: F401 (registers envs)
|
| 3 |
+
import pandas as pd
|
| 4 |
+
import numpy as np
|
| 5 |
+
import os
|
| 6 |
+
import sinergym
|
| 7 |
+
import json
|
| 8 |
+
import sys
|
| 9 |
+
from unihvac.find_files import (
|
| 10 |
+
detect_paths,
|
| 11 |
+
find_manifest,
|
| 12 |
+
find_building_and_weather_from_manifest,
|
| 13 |
+
)
|
| 14 |
+
from unihvac.tables import (
|
| 15 |
+
print_monthly_tables_extra,
|
| 16 |
+
print_monthly_tables_split,
|
| 17 |
+
)
|
| 18 |
+
from unihvac.rollout import run_rollout_to_df
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
# ============================================
|
| 22 |
+
# FOR TABLE
|
| 23 |
+
pd.set_option("display.max_columns", None)
|
| 24 |
+
pd.set_option("display.width", 240)
|
| 25 |
+
pd.set_option("display.max_colwidth", 32)
|
| 26 |
+
pd.set_option("display.float_format", lambda x: f"{x:,.2f}")
|
| 27 |
+
# ============================================
|
| 28 |
+
|
| 29 |
+
# ==============================================================================
|
| 30 |
+
# USER CONFIGURATION
|
| 31 |
+
# ==============================================================================
|
| 32 |
+
TARGET_LOCATION = "Atlanta" # Buffalo, Miami, Dubai, Fairbanks, HoChiMinh
|
| 33 |
+
TARGET_THERMAL = "default" # default, high_performance, low_performance
|
| 34 |
+
TARGET_OCCUPANCY = "standard" # standard, school, retail, etc.
|
| 35 |
+
|
| 36 |
+
# Baseline-like setpoints (also used as DT seed)
|
| 37 |
+
HEATING_SP = 21.0
|
| 38 |
+
COOLING_SP = 24.0
|
| 39 |
+
|
| 40 |
+
# Choose policy mode: "dt" or "rbc"
|
| 41 |
+
POLICY_TYPE = "dt" # change to "rbc" to match baseline runner exactly
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
# ==========================================
|
| 45 |
+
# PATH DISCOVERY (ROBUST)
|
| 46 |
+
# ==========================================
|
| 47 |
+
paths = detect_paths(outputs_dirname="baseline_results")
|
| 48 |
+
manifest_path = find_manifest(paths, building="OfficeSmall", prefer_patched=True)
|
| 49 |
+
output_root = str(paths.outputs_root)
|
| 50 |
+
os.makedirs(output_root, exist_ok=True)
|
| 51 |
+
TIME_STEP_HOURS = 900.0 / 3600.0 # 0.25 h
|
| 52 |
+
|
| 53 |
+
# ==========================================
|
| 54 |
+
# ACTUATORS & VARIABLES (keep identical)
|
| 55 |
+
# ==========================================
|
| 56 |
+
hot_actuators = {
|
| 57 |
+
"Htg_Core": ("Zone Temperature Control", "Heating Setpoint", "CORE_ZN"),
|
| 58 |
+
"Clg_Core": ("Zone Temperature Control", "Cooling Setpoint", "CORE_ZN"),
|
| 59 |
+
"Htg_P1": ("Zone Temperature Control", "Heating Setpoint", "PERIMETER_ZN_1"),
|
| 60 |
+
"Clg_P1": ("Zone Temperature Control", "Cooling Setpoint", "PERIMETER_ZN_1"),
|
| 61 |
+
"Htg_P2": ("Zone Temperature Control", "Heating Setpoint", "PERIMETER_ZN_2"),
|
| 62 |
+
"Clg_P2": ("Zone Temperature Control", "Cooling Setpoint", "PERIMETER_ZN_2"),
|
| 63 |
+
"Htg_P3": ("Zone Temperature Control", "Heating Setpoint", "PERIMETER_ZN_3"),
|
| 64 |
+
"Clg_P3": ("Zone Temperature Control", "Cooling Setpoint", "PERIMETER_ZN_3"),
|
| 65 |
+
"Htg_P4": ("Zone Temperature Control", "Heating Setpoint", "PERIMETER_ZN_4"),
|
| 66 |
+
"Clg_P4": ("Zone Temperature Control", "Cooling Setpoint", "PERIMETER_ZN_4"),
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
hot_variables = {
|
| 70 |
+
"outdoor_temp": ("Site Outdoor Air DryBulb Temperature", "Environment"),
|
| 71 |
+
"core_temp": ("Zone Air Temperature", "Core_ZN"),
|
| 72 |
+
"perim1_temp": ("Zone Air Temperature", "Perimeter_ZN_1"),
|
| 73 |
+
"perim2_temp": ("Zone Air Temperature", "Perimeter_ZN_2"),
|
| 74 |
+
"perim3_temp": ("Zone Air Temperature", "Perimeter_ZN_3"),
|
| 75 |
+
"perim4_temp": ("Zone Air Temperature", "Perimeter_ZN_4"),
|
| 76 |
+
"elec_power": ("Facility Total HVAC Electricity Demand Rate", "Whole Building"),
|
| 77 |
+
"core_occ_count": ("Zone People Occupant Count", "CORE_ZN"),
|
| 78 |
+
"perim1_occ_count": ("Zone People Occupant Count", "PERIMETER_ZN_1"),
|
| 79 |
+
"perim2_occ_count": ("Zone People Occupant Count", "PERIMETER_ZN_2"),
|
| 80 |
+
"perim3_occ_count": ("Zone People Occupant Count", "PERIMETER_ZN_3"),
|
| 81 |
+
"perim4_occ_count": ("Zone People Occupant Count", "PERIMETER_ZN_4"),
|
| 82 |
+
|
| 83 |
+
"outdoor_dewpoint": ("Site Outdoor Air Dewpoint Temperature", "Environment"),
|
| 84 |
+
"outdoor_wetbulb": ("Site Outdoor Air Wetbulb Temperature", "Environment"),
|
| 85 |
+
|
| 86 |
+
"core_rh": ("Zone Air Relative Humidity", "CORE_ZN"),
|
| 87 |
+
"perim1_rh": ("Zone Air Relative Humidity", "PERIMETER_ZN_1"),
|
| 88 |
+
"perim2_rh": ("Zone Air Relative Humidity", "PERIMETER_ZN_2"),
|
| 89 |
+
"perim3_rh": ("Zone Air Relative Humidity", "PERIMETER_ZN_3"),
|
| 90 |
+
"perim4_rh": ("Zone Air Relative Humidity", "PERIMETER_ZN_4"),
|
| 91 |
+
|
| 92 |
+
"core_ash55_notcomfortable_summer": ("Zone Thermal Comfort ASHRAE 55 Simple Model Summer Clothes Not Comfortable Time", "CORE_ZN"),
|
| 93 |
+
"core_ash55_notcomfortable_winter": ("Zone Thermal Comfort ASHRAE 55 Simple Model Winter Clothes Not Comfortable Time", "CORE_ZN"),
|
| 94 |
+
"core_ash55_notcomfortable_any": ("Zone Thermal Comfort ASHRAE 55 Simple Model Summer or Winter Clothes Not Comfortable Time", "CORE_ZN"),
|
| 95 |
+
|
| 96 |
+
"p1_ash55_notcomfortable_any": ("Zone Thermal Comfort ASHRAE 55 Simple Model Summer or Winter Clothes Not Comfortable Time", "PERIMETER_ZN_1"),
|
| 97 |
+
"p2_ash55_notcomfortable_any": ("Zone Thermal Comfort ASHRAE 55 Simple Model Summer or Winter Clothes Not Comfortable Time", "PERIMETER_ZN_2"),
|
| 98 |
+
"p3_ash55_notcomfortable_any": ("Zone Thermal Comfort ASHRAE 55 Simple Model Summer or Winter Clothes Not Comfortable Time", "PERIMETER_ZN_3"),
|
| 99 |
+
"p4_ash55_notcomfortable_any": ("Zone Thermal Comfort ASHRAE 55 Simple Model Summer or Winter Clothes Not Comfortable Time", "PERIMETER_ZN_4"),
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
class BaselineReward:
|
| 104 |
+
def __init__(self, *args, **kwargs):
|
| 105 |
+
pass
|
| 106 |
+
def __call__(self, obs_dict):
|
| 107 |
+
return 0.0, {}
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def run_eval_for_location(location, building_path, weather_path):
|
| 111 |
+
print("\n" + "=" * 80)
|
| 112 |
+
print(f"Running eval for location: {location}")
|
| 113 |
+
print(f" Building: {building_path}")
|
| 114 |
+
print(f" Weather: {weather_path}")
|
| 115 |
+
print(f" Policy: {POLICY_TYPE}")
|
| 116 |
+
print("=" * 80)
|
| 117 |
+
|
| 118 |
+
out_dir = os.path.join(output_root, location)
|
| 119 |
+
os.makedirs(out_dir, exist_ok=True)
|
| 120 |
+
|
| 121 |
+
# Build policy (DT or RBC) — policy state stays outside policy_fn
|
| 122 |
+
if POLICY_TYPE == "dt":
|
| 123 |
+
RUN_DIR = "Trajectories_code/run_007" # update
|
| 124 |
+
policy = make_policy(
|
| 125 |
+
"dt",
|
| 126 |
+
ckpt_path=os.path.join(RUN_DIR, "ckpt_10.pt"),
|
| 127 |
+
model_config_path=os.path.join(RUN_DIR, "model_config.json"),
|
| 128 |
+
norm_stats_path="Trajectories_code/traj_results/norm_stats.npz",
|
| 129 |
+
context_len=24,
|
| 130 |
+
max_tokens_per_step=64,
|
| 131 |
+
)
|
| 132 |
+
else:
|
| 133 |
+
policy = make_policy("rbc", heating_sp=HEATING_SP, cooling_sp=COOLING_SP)
|
| 134 |
+
|
| 135 |
+
policy.reset()
|
| 136 |
+
|
| 137 |
+
def policy_fn(obs, info, step):
|
| 138 |
+
if step == 0:
|
| 139 |
+
print("OBS TYPE:", type(obs), "SHAPE:", getattr(obs, "shape", None))
|
| 140 |
+
if isinstance(obs, dict):
|
| 141 |
+
print("OBS KEYS SAMPLE:", list(obs.keys())[:10])
|
| 142 |
+
action, _, _ = policy.act(obs, info, step)
|
| 143 |
+
return action
|
| 144 |
+
|
| 145 |
+
df = run_rollout_to_df(
|
| 146 |
+
building_path=str(building_path),
|
| 147 |
+
weather_path=str(weather_path),
|
| 148 |
+
variables=hot_variables,
|
| 149 |
+
actuators=hot_actuators,
|
| 150 |
+
policy_fn=policy_fn,
|
| 151 |
+
location=location,
|
| 152 |
+
timestep_hours=TIME_STEP_HOURS,
|
| 153 |
+
heating_sp=HEATING_SP,
|
| 154 |
+
cooling_sp=COOLING_SP,
|
| 155 |
+
reward=BaselineReward,
|
| 156 |
+
max_steps=None,
|
| 157 |
+
verbose=True,
|
| 158 |
+
)
|
| 159 |
+
|
| 160 |
+
print("setpoint_htg min/max:", df["setpoint_htg"].min(), df["setpoint_htg"].max())
|
| 161 |
+
print("setpoint_clg min/max:", df["setpoint_clg"].min(), df["setpoint_clg"].max())
|
| 162 |
+
print("comfort_violation min/mean/max:", df["comfort_violation_degCh"].min(),
|
| 163 |
+
df["comfort_violation_degCh"].mean(), df["comfort_violation_degCh"].max())
|
| 164 |
+
|
| 165 |
+
print_monthly_tables_extra(df, location)
|
| 166 |
+
print_monthly_tables_split(df, location, time_step_hours=TIME_STEP_HOURS)
|
| 167 |
+
|
| 168 |
+
df.to_csv(os.path.join(out_dir, "eval_timeseries.csv"), index=False)
|
| 169 |
+
|
| 170 |
+
return df
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
if __name__ == "__main__":
|
| 174 |
+
bpath, wpath = find_building_and_weather_from_manifest(
|
| 175 |
+
manifest_path,
|
| 176 |
+
location=TARGET_LOCATION,
|
| 177 |
+
occupancy=TARGET_OCCUPANCY,
|
| 178 |
+
thermal=TARGET_THERMAL,
|
| 179 |
+
require_patched=True,
|
| 180 |
+
)
|
| 181 |
+
|
| 182 |
+
print("USING BUILDING FILE:", bpath)
|
| 183 |
+
run_eval_for_location(TARGET_LOCATION, str(bpath), str(wpath))
|
Inference_&_LLM/inference_server.py
ADDED
|
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import uvicorn
|
| 2 |
+
from fastapi import FastAPI, HTTPException
|
| 3 |
+
from pydantic import BaseModel
|
| 4 |
+
from typing import List, Dict, Any, Optional
|
| 5 |
+
import numpy as np
|
| 6 |
+
import torch
|
| 7 |
+
import os
|
| 8 |
+
import sys
|
| 9 |
+
|
| 10 |
+
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
| 11 |
+
|
| 12 |
+
try:
|
| 13 |
+
from unihvac.policy import DecisionTransformerPolicy5Zone
|
| 14 |
+
except ImportError:
|
| 15 |
+
from policy import DecisionTransformerPolicy5Zone
|
| 16 |
+
|
| 17 |
+
# Import LLM Sensor
|
| 18 |
+
try:
|
| 19 |
+
from LLM_part.digital_human_manager import DigitalHumanSensor
|
| 20 |
+
except ImportError:
|
| 21 |
+
print(" LLM features disabled.")
|
| 22 |
+
DigitalHumanSensor = None
|
| 23 |
+
|
| 24 |
+
app = FastAPI()
|
| 25 |
+
|
| 26 |
+
# --- 2. CONFIGURATION ---
|
| 27 |
+
BASE_PATH = "gen_hvac"
|
| 28 |
+
CKPT_PATH = os.path.join(BASE_PATH, "training-runs/run_001/last.pt")
|
| 29 |
+
MODEL_CONFIG = os.path.join(BASE_PATH, "training-runs/run_001/model_config.json")
|
| 30 |
+
NORM_STATS = "TrajectoryData_from_docker/norm_stats_v4_topk.npz"
|
| 31 |
+
|
| 32 |
+
FIXED_ENERGY_TARGET = -40000.0
|
| 33 |
+
COMFORT_RELAXED = -1000.0
|
| 34 |
+
COMFORT_STRICT = -1000.0
|
| 35 |
+
|
| 36 |
+
class SafetyCheck:
|
| 37 |
+
def __init__(self):
|
| 38 |
+
self.current_comfort_target = COMFORT_RELAXED
|
| 39 |
+
self.ema_alpha = 0.3
|
| 40 |
+
self.power_limit = 12000.0
|
| 41 |
+
|
| 42 |
+
def update(self, llm_votes: Dict[str, float], current_power_watts: float):
|
| 43 |
+
votes = list(llm_votes.values())
|
| 44 |
+
max_discomfort = max([abs(v) for v in votes]) if votes else 0.0
|
| 45 |
+
|
| 46 |
+
if max_discomfort >= 1.5:
|
| 47 |
+
goal_target = COMFORT_STRICT
|
| 48 |
+
status = "CRITICAL COMPLAINT"
|
| 49 |
+
elif max_discomfort >= 0.5:
|
| 50 |
+
goal_target = (COMFORT_RELAXED + COMFORT_STRICT) / 2
|
| 51 |
+
status = "MILD DISCOMFORT"
|
| 52 |
+
else:
|
| 53 |
+
goal_target = COMFORT_RELAXED
|
| 54 |
+
status = "SATISFIED"
|
| 55 |
+
if current_power_watts > self.power_limit:
|
| 56 |
+
goal_target = min(goal_target, -25000.0)
|
| 57 |
+
status += " [ENERGY LIMIT EXCEEDED]"
|
| 58 |
+
|
| 59 |
+
# D. Prevent Hallucination Spikes
|
| 60 |
+
self.current_comfort_target = (1 - self.ema_alpha) * self.current_comfort_target + \
|
| 61 |
+
(self.ema_alpha * goal_target)
|
| 62 |
+
|
| 63 |
+
return self.current_comfort_target, status
|
| 64 |
+
|
| 65 |
+
dt_policy = None
|
| 66 |
+
llm_sensor = None
|
| 67 |
+
governor = SafetyCheck()
|
| 68 |
+
|
| 69 |
+
# Keys Mapping
|
| 70 |
+
ENV_KEYS = [
|
| 71 |
+
'month', 'day_of_month', 'hour',
|
| 72 |
+
'outdoor_temp', 'core_temp', 'perim1_temp', 'perim2_temp', 'perim3_temp', 'perim4_temp',
|
| 73 |
+
'elec_power',
|
| 74 |
+
'core_occ_count', 'perim1_occ_count', 'perim2_occ_count', 'perim3_occ_count', 'perim4_occ_count',
|
| 75 |
+
'outdoor_dewpoint', 'outdoor_wetbulb',
|
| 76 |
+
'core_rh', 'perim1_rh', 'perim2_rh', 'perim3_rh', 'perim4_rh',
|
| 77 |
+
'core_ash55_notcomfortable_summer', 'core_ash55_notcomfortable_winter', 'core_ash55_notcomfortable_any',
|
| 78 |
+
'p1_ash55_notcomfortable_any', 'p2_ash55_notcomfortable_any', 'p3_ash55_notcomfortable_any', 'p4_ash55_notcomfortable_any',
|
| 79 |
+
'total_electricity_HVAC'
|
| 80 |
+
]
|
| 81 |
+
|
| 82 |
+
@app.on_event("startup")
|
| 83 |
+
def load_model():
|
| 84 |
+
global dt_policy, llm_sensor
|
| 85 |
+
|
| 86 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 87 |
+
|
| 88 |
+
# 1. Load DT Policy
|
| 89 |
+
try:
|
| 90 |
+
dt_policy = DecisionTransformerPolicy5Zone(
|
| 91 |
+
ckpt_path=CKPT_PATH,
|
| 92 |
+
model_config_path=MODEL_CONFIG,
|
| 93 |
+
norm_stats_path=NORM_STATS,
|
| 94 |
+
context_len=48,
|
| 95 |
+
max_tokens_per_step=64,
|
| 96 |
+
device=device,
|
| 97 |
+
temperature=0.5,
|
| 98 |
+
target_energy=FIXED_ENERGY_TARGET,
|
| 99 |
+
target_comfort=COMFORT_RELAXED
|
| 100 |
+
)
|
| 101 |
+
print("DT Policy Loaded.")
|
| 102 |
+
except Exception as e:
|
| 103 |
+
print(f"DT Load Error: {e}")
|
| 104 |
+
|
| 105 |
+
# 2. Load LLM
|
| 106 |
+
if DigitalHumanSensor:
|
| 107 |
+
try:
|
| 108 |
+
llm_sensor = DigitalHumanSensor(model_name="deepseek-v2")
|
| 109 |
+
print("LLM Sensor Loaded.")
|
| 110 |
+
except Exception as e:
|
| 111 |
+
print(f"LLM Error: {e}")
|
| 112 |
+
|
| 113 |
+
class ObsPayload(BaseModel):
|
| 114 |
+
step: int
|
| 115 |
+
obs: List[float]
|
| 116 |
+
info: Dict[str, Any] = {}
|
| 117 |
+
|
| 118 |
+
class ResetPayload(BaseModel):
|
| 119 |
+
message: str = "reset"
|
| 120 |
+
|
| 121 |
+
@app.post("/reset")
|
| 122 |
+
def reset_policy(payload: ResetPayload):
|
| 123 |
+
if dt_policy:
|
| 124 |
+
dt_policy.reset()
|
| 125 |
+
|
| 126 |
+
global governor
|
| 127 |
+
governor = SafetyCheck()
|
| 128 |
+
dt_policy.target_energy = FIXED_ENERGY_TARGET
|
| 129 |
+
dt_policy.target_comfort = COMFORT_RELAXED
|
| 130 |
+
return {"status": "success"}
|
| 131 |
+
return {"status": "error"}
|
| 132 |
+
|
| 133 |
+
@app.post("/predict")
|
| 134 |
+
def get_action(payload: ObsPayload):
|
| 135 |
+
global dt_policy, llm_sensor, governor
|
| 136 |
+
|
| 137 |
+
if dt_policy is None:
|
| 138 |
+
raise HTTPException(status_code=503, detail="Model not loaded")
|
| 139 |
+
|
| 140 |
+
obs_arr = np.array(payload.obs, dtype=np.float32)
|
| 141 |
+
|
| 142 |
+
# 1. LLM Loop (Keep existing)
|
| 143 |
+
if llm_sensor and (payload.step % 4 == 0):
|
| 144 |
+
try:
|
| 145 |
+
env_map = dict(zip(ENV_KEYS, obs_arr))
|
| 146 |
+
votes = llm_sensor.get_comfort_votes(env_map)
|
| 147 |
+
new_target, status = governor.update(votes, obs_arr[9])
|
| 148 |
+
dt_policy.target_comfort = new_target
|
| 149 |
+
print(f"[Step {payload.step}] LLM: {votes} | Status: {status} | Target: {new_target:.0f}")
|
| 150 |
+
except Exception:
|
| 151 |
+
pass
|
| 152 |
+
action, _, _ = dt_policy.act(obs_arr, payload.info, payload.step)
|
| 153 |
+
|
| 154 |
+
return {"action": action.tolist()}
|
| 155 |
+
|
| 156 |
+
if __name__ == "__main__":
|
| 157 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|
Inference_&_LLM/llm_client
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
|
| 3 |
+
OLLAMA_URL = ""
|
| 4 |
+
|
| 5 |
+
def query_llm(prompt: str, model: str = "") -> str:
|
| 6 |
+
payload = {
|
| 7 |
+
"model": model,
|
| 8 |
+
"prompt": prompt,
|
| 9 |
+
"stream": False
|
| 10 |
+
}
|
| 11 |
+
try:
|
| 12 |
+
response = requests.post(OLLAMA_URL, json=payload, timeout=60)
|
| 13 |
+
response.raise_for_status()
|
| 14 |
+
data = response.json()
|
| 15 |
+
return data.get("response", "").strip()
|
| 16 |
+
except Exception as e:
|
| 17 |
+
return f"[Error] {e}"
|