Upload 2 files
Browse files- check_data_structure.py +65 -0
- data_generation.py +171 -0
check_data_structure.py
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import pickle
|
3 |
+
|
4 |
+
if __name__ == '__main__':
|
5 |
+
|
6 |
+
file = "s_month.pkl"
|
7 |
+
with open(file, "rb") as f:
|
8 |
+
data = pickle.load(f)
|
9 |
+
|
10 |
+
print("data ", type(data), "length: ", len(data))
|
11 |
+
dict0 = data[0]
|
12 |
+
print("data[0] ", type(dict0), "length: ", len(dict0))
|
13 |
+
print("data[0].keys() ", dict0.keys())
|
14 |
+
print()
|
15 |
+
observations = dict0['observations']
|
16 |
+
print("observations ", type(observations), "length: ", len(observations))
|
17 |
+
observations0 = observations[0]
|
18 |
+
print("observations[0] ", type(observations0), "length: ", len(observations0))
|
19 |
+
observations00 = observations[0][0]
|
20 |
+
print("observations[0][0] ", type(observations00))
|
21 |
+
print()
|
22 |
+
next_observations = dict0['next_observations']
|
23 |
+
print("next_observations ", type(next_observations), "length: ", len(next_observations))
|
24 |
+
next_observations0 = next_observations[0]
|
25 |
+
print("next_observations[0] ", type(next_observations0), "length: ", len(next_observations0))
|
26 |
+
next_observations00 = next_observations[0][0]
|
27 |
+
print("next_observations[0][0] ", type(next_observations00))
|
28 |
+
print()
|
29 |
+
actions = dict0['actions']
|
30 |
+
print("actions ", type(actions), "length: ", len(actions))
|
31 |
+
actions0 = actions[0]
|
32 |
+
print("actions[0] ", type(actions0), "length: ", len(actions0))
|
33 |
+
actions00 = actions[0][0]
|
34 |
+
print("actions[0][0] ", type(actions00))
|
35 |
+
print()
|
36 |
+
rewards = dict0['rewards']
|
37 |
+
print("rewards ", type(rewards), "length: ", len(rewards))
|
38 |
+
rewards0 = rewards[0]
|
39 |
+
print("rewards[0] ", type(rewards0))
|
40 |
+
print()
|
41 |
+
terminals = dict0['terminals']
|
42 |
+
print("terminals ", type(terminals), "length: ", len(terminals))
|
43 |
+
terminals0 = terminals[0]
|
44 |
+
print("terminals[0] ", type(terminals0))
|
45 |
+
print()
|
46 |
+
print("========================= Data Size =============================")
|
47 |
+
length = 0
|
48 |
+
for d in data:
|
49 |
+
if len(d["observations"]) > length:
|
50 |
+
length = len(d["observations"])
|
51 |
+
|
52 |
+
print("Amount Of Sequences: ", len(data))
|
53 |
+
print("Longest Sequence: ", length)
|
54 |
+
|
55 |
+
file_size = os.stat(file).st_size
|
56 |
+
if file_size > 1e+6:
|
57 |
+
string_byte = "(" + str(round(file_size / 1e+6)) + " MB)"
|
58 |
+
else:
|
59 |
+
string_byte = "(" + str(round(file_size / 1e+3)) + " kB)"
|
60 |
+
print(file, string_byte)
|
61 |
+
|
62 |
+
print(data[0]["observations"][0][:3])
|
63 |
+
|
64 |
+
|
65 |
+
|
data_generation.py
ADDED
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ast import Raise
|
2 |
+
from re import S
|
3 |
+
import re
|
4 |
+
import gym
|
5 |
+
|
6 |
+
import matplotlib.pyplot as plt
|
7 |
+
|
8 |
+
from citylearn.citylearn import CityLearnEnv
|
9 |
+
import numpy as np
|
10 |
+
import pandas as pd
|
11 |
+
import os
|
12 |
+
|
13 |
+
from collections import deque
|
14 |
+
import argparse
|
15 |
+
import random
|
16 |
+
# import logger
|
17 |
+
import logging
|
18 |
+
from sys import stdout
|
19 |
+
from copy import deepcopy
|
20 |
+
|
21 |
+
|
22 |
+
class Constants:
|
23 |
+
episodes = 3
|
24 |
+
schema_path = '/home/aicrowd/data/citylearn_challenge_2022_phase_1/schema.json'
|
25 |
+
variables_to_forecast = ['solar_generation', 'non_shiftable_load', 'electricity_pricing', 'carbon_intensity', "electricity_consumption_crude",
|
26 |
+
'hour', 'month']
|
27 |
+
|
28 |
+
additional_variable = ['hour', "month"]
|
29 |
+
|
30 |
+
|
31 |
+
# create env from citylearn
|
32 |
+
env = CityLearnEnv(schema=Constants.schema_path)
|
33 |
+
|
34 |
+
def action_space_to_dict(aspace):
|
35 |
+
""" Only for box space """
|
36 |
+
return { "high": aspace.high,
|
37 |
+
"low": aspace.low,
|
38 |
+
"shape": aspace.shape,
|
39 |
+
"dtype": str(aspace.dtype)
|
40 |
+
}
|
41 |
+
|
42 |
+
def env_reset(env):
|
43 |
+
observations = env.reset()
|
44 |
+
action_space = env.action_space
|
45 |
+
observation_space = env.observation_space
|
46 |
+
building_info = env.get_building_information()
|
47 |
+
building_info = list(building_info.values())
|
48 |
+
action_space_dicts = [action_space_to_dict(asp) for asp in action_space]
|
49 |
+
observation_space_dicts = [action_space_to_dict(osp) for osp in observation_space]
|
50 |
+
obs_dict = {"action_space": action_space_dicts,
|
51 |
+
"observation_space": observation_space_dicts,
|
52 |
+
"building_info": building_info,
|
53 |
+
"observation": observations }
|
54 |
+
return obs_dict
|
55 |
+
|
56 |
+
## env wrapper for stable baselines
|
57 |
+
class EnvCityGym(gym.Env):
|
58 |
+
"""
|
59 |
+
Env wrapper coming from the gym library.
|
60 |
+
"""
|
61 |
+
def __init__(self, env):
|
62 |
+
self.env = env
|
63 |
+
|
64 |
+
# get the number of buildings
|
65 |
+
self.num_buildings = len(env.action_space)
|
66 |
+
print("num_buildings: ", self.num_buildings)
|
67 |
+
|
68 |
+
self.action_space = gym.spaces.Box(low=np.array([-0.2]), high=np.array([0.2]), dtype=np.float32)
|
69 |
+
|
70 |
+
self.observation_space = gym.spaces.MultiDiscrete(np.array([25, 13]))
|
71 |
+
|
72 |
+
def reset(self):
|
73 |
+
obs_dict = env_reset(self.env)
|
74 |
+
obs = self.env.reset()
|
75 |
+
|
76 |
+
observation = [o for o in obs]
|
77 |
+
|
78 |
+
return observation
|
79 |
+
|
80 |
+
def step(self, action):
|
81 |
+
"""
|
82 |
+
we apply the same action for all the buildings
|
83 |
+
"""
|
84 |
+
obs, reward, done, info = self.env.step(action)
|
85 |
+
|
86 |
+
observation = [o for o in obs]
|
87 |
+
|
88 |
+
return observation, reward, done, info
|
89 |
+
|
90 |
+
def render(self, mode='human'):
|
91 |
+
return self.env.render(mode)
|
92 |
+
|
93 |
+
|
94 |
+
|
95 |
+
|
96 |
+
def env_run_without_action(actions_all=None):
|
97 |
+
"""
|
98 |
+
This function is used to run the environment without applying any action.
|
99 |
+
and return the dataset
|
100 |
+
"""
|
101 |
+
# create env from citylearn
|
102 |
+
env = CityLearnEnv(schema=Constants.schema_path)
|
103 |
+
|
104 |
+
# get the number of buildings
|
105 |
+
num_buildings = len(env.action_space)
|
106 |
+
print("num_buildings: ", num_buildings)
|
107 |
+
|
108 |
+
# create env wrapper
|
109 |
+
env = EnvCityGym(env)
|
110 |
+
|
111 |
+
# reset the environment
|
112 |
+
obs = env.reset()
|
113 |
+
|
114 |
+
infos = []
|
115 |
+
|
116 |
+
for id_building in range(num_buildings):
|
117 |
+
# run the environment
|
118 |
+
obs = env.reset()
|
119 |
+
|
120 |
+
for i in range(8759):
|
121 |
+
|
122 |
+
info_tmp = env.env.buildings[id_building].observations.copy()
|
123 |
+
|
124 |
+
if actions_all is not None:
|
125 |
+
|
126 |
+
action = [[actions_all[i + 8759 * b]] for b in range(num_buildings)]
|
127 |
+
|
128 |
+
else:
|
129 |
+
# we get the action
|
130 |
+
action = np.zeros((5, )) # 5 is the number of buildings
|
131 |
+
|
132 |
+
# reshape action into form like [[0], [0], [0], [0], [0]]
|
133 |
+
action = [[a] for a in action]
|
134 |
+
|
135 |
+
#print(action)
|
136 |
+
|
137 |
+
obs, reward, done, info = env.step(action)
|
138 |
+
|
139 |
+
info_tmp['reward'] = reward[id_building]
|
140 |
+
info_tmp['building_id'] = id_building
|
141 |
+
infos.append(info_tmp)
|
142 |
+
|
143 |
+
if done:
|
144 |
+
obs = env.reset()
|
145 |
+
|
146 |
+
# create the data
|
147 |
+
data_pd = {}
|
148 |
+
|
149 |
+
for info in infos:
|
150 |
+
for i, v in info.items():
|
151 |
+
try:
|
152 |
+
data_pd[i].append(v)
|
153 |
+
except:
|
154 |
+
data_pd[i] = [v]
|
155 |
+
|
156 |
+
data = pd.DataFrame(infos)
|
157 |
+
|
158 |
+
return data
|
159 |
+
|
160 |
+
if __name__ == "__main__":
|
161 |
+
|
162 |
+
# data generation
|
163 |
+
data = env_run_without_action()
|
164 |
+
|
165 |
+
# we only normalize month and hour
|
166 |
+
data['hour'] = data['hour']/24
|
167 |
+
data['month'] = data['month']/12
|
168 |
+
|
169 |
+
# save the data into the data_histo folder into parquet format
|
170 |
+
data.to_parquet("data_histo/data.parquet")
|
171 |
+
|