Mei000 commited on
Commit
d446abe
1 Parent(s): f991575

Delete functions.py

Browse files
Files changed (1) hide show
  1. functions.py +0 -213
functions.py DELETED
@@ -1,213 +0,0 @@
1
- import requests
2
- import os
3
- import joblib
4
- import pandas as pd
5
- import datetime
6
- import numpy as np
7
- import time
8
- from sklearn.preprocessing import OrdinalEncoder
9
- from dotenv import load_dotenv
10
- load_dotenv(override=True)
11
-
12
-
13
- def decode_features(df, feature_view):
14
- """Decodes features in the input DataFrame using corresponding Hopsworks Feature Store transformation functions"""
15
- df_res = df.copy()
16
-
17
- import inspect
18
-
19
-
20
- td_transformation_functions = feature_view._batch_scoring_server._transformation_functions
21
-
22
- res = {}
23
- for feature_name in td_transformation_functions:
24
- if feature_name in df_res.columns:
25
- td_transformation_function = td_transformation_functions[feature_name]
26
- sig, foobar_locals = inspect.signature(td_transformation_function.transformation_fn), locals()
27
- param_dict = dict([(param.name, param.default) for param in sig.parameters.values() if param.default != inspect._empty])
28
- if td_transformation_function.name == "min_max_scaler":
29
- df_res[feature_name] = df_res[feature_name].map(
30
- lambda x: x * (param_dict["max_value"] - param_dict["min_value"]) + param_dict["min_value"])
31
-
32
- elif td_transformation_function.name == "standard_scaler":
33
- df_res[feature_name] = df_res[feature_name].map(
34
- lambda x: x * param_dict['std_dev'] + param_dict["mean"])
35
- elif td_transformation_function.name == "label_encoder":
36
- dictionary = param_dict['value_to_index']
37
- dictionary_ = {v: k for k, v in dictionary.items()}
38
- df_res[feature_name] = df_res[feature_name].map(
39
- lambda x: dictionary_[x])
40
- return df_res
41
-
42
-
43
- def get_model(project, model_name, evaluation_metric, sort_metrics_by):
44
- """Retrieve desired model or download it from the Hopsworks Model Registry.
45
- In second case, it will be physically downloaded to this directory"""
46
- TARGET_FILE = "model.pkl"
47
- list_of_files = [os.path.join(dirpath,filename) for dirpath, _, filenames \
48
- in os.walk('.') for filename in filenames if filename == TARGET_FILE]
49
-
50
- if list_of_files:
51
- model_path = list_of_files[0]
52
- model = joblib.load(model_path)
53
- else:
54
- if not os.path.exists(TARGET_FILE):
55
- mr = project.get_model_registry()
56
- # get best model based on custom metrics
57
- model = mr.get_best_model(model_name,
58
- evaluation_metric,
59
- sort_metrics_by)
60
- model_dir = model.download()
61
- model = joblib.load(model_dir + "/model.pkl")
62
-
63
- return model
64
-
65
-
66
- def get_air_quality_data(station_name):
67
- AIR_QUALITY_API_KEY = os.getenv('AIR_QUALITY_API_KEY')
68
- request_value = f'https://api.waqi.info/feed/{station_name}/?token={AIR_QUALITY_API_KEY}'
69
- answer = requests.get(request_value).json()["data"]
70
- forecast = answer['forecast']['daily']
71
- return [
72
- answer["time"]["s"][:10], # Date
73
- int(forecast['pm25'][0]['avg']), # avg predicted pm25
74
- int(forecast['pm10'][0]['avg']), # avg predicted pm10
75
- max(int(forecast['pm25'][0]['avg']), int(forecast['pm10'][0]['avg'])) # avg predicted aqi
76
- ]
77
-
78
- def get_air_quality_df(data):
79
- col_names = [
80
- 'date',
81
- 'pm25',
82
- 'pm10',
83
- 'aqi'
84
- ]
85
-
86
- new_data = pd.DataFrame(
87
- data
88
- ).T
89
- new_data.columns = col_names
90
- new_data['pm25'] = pd.to_numeric(new_data['pm25'])
91
- new_data['pm10'] = pd.to_numeric(new_data['pm10'])
92
- new_data['aqi'] = pd.to_numeric(new_data['aqi'])
93
-
94
- print(new_data)
95
- return new_data
96
-
97
-
98
- def get_weather_data_daily(city):
99
- WEATHER_API_KEY = os.getenv('WEATHER_API_KEY')
100
- answer = requests.get(f'https://weather.visualcrossing.com/VisualCrossingWebServices/rest/services/timeline/{city}/today?unitGroup=metric&include=days&key={WEATHER_API_KEY}&contentType=json').json()
101
- data = answer['days'][0]
102
- return [
103
- answer['city'],
104
- data['date'],
105
- data['tempmax'],
106
- data['tempmin'],
107
- data['temp'],
108
- data['feelslikemax'],
109
- data['feelslikemin'],
110
- data['feelslike'],
111
- data['dew'],
112
- data['humidity'],
113
- data['precip'],
114
- data['precipprob'],
115
- data['precipcover'],
116
- data['snow'],
117
- data['snowdepth'],
118
- data['windgust'],
119
- data['windspeed'],
120
- data['winddir'],
121
- data['pressure'],
122
- data['cloudcover'],
123
- data['visibility'],
124
- data['solarradiation'],
125
- data['solarenergy'],
126
- data['uvindex'],
127
- data['conditions']
128
- ]
129
- def get_weather_data_weekly(city: str, start_date: datetime) -> pd.DataFrame:
130
- #WEATHER_API_KEY = os.getenv('WEATHER_API_KEY')
131
- ##end_date = f"{start_date + datetime.timedelta(days=6):%Y-%m-%d}"
132
- next7days_weather=pd.read_csv('https://weather.visualcrossing.com/VisualCrossingWebServices/rest/services/timeline/Beijing/next7days?unitGroup=metric&include=days&key=5WNL2M94KKQ4R4F32LFV8DPE4&contentType=csv')
133
- #answer = requests.get(f'https://weather.visualcrossing.com/VisualCrossingWebServices/rest/services/timeline/{city}/{start_date}/{end_date}?unitGroup=metric&include=days&key={WEATHER_API_KEY}&contentType=json').json()
134
-
135
-
136
- df_weather = pd.DataFrame(next7days_weather)
137
- df_weather.rename(columns = {"datetime": "date"},
138
- inplace = True)
139
- df_weather.rename(columns = {"name": "city"},
140
- inplace = True)
141
- df_weather.rename(columns = {"sealevelpressure": "pressure"},
142
- inplace = True)
143
- df_weather = df_weather.drop(labels=['city','preciptype','sunrise','sunset','conditions','description','icon','stations'], axis=1) #删除不用的列
144
- df_weather.date = df_weather.date.apply(timestamp_2_time)
145
-
146
- return df_weather
147
-
148
- def get_weather_df(data):
149
- col_names = [
150
- 'name',
151
- 'date',
152
- 'tempmax',
153
- 'tempmin',
154
- 'temp',
155
- 'feelslikemax',
156
- 'feelslikemin',
157
- 'feelslike',
158
- 'dew',
159
- 'humidity',
160
- 'precip',
161
- 'precipprob',
162
- 'precipcover',
163
- 'snow',
164
- 'snowdepth',
165
- 'windgust',
166
- 'windspeed',
167
- 'winddir',
168
- 'pressure',
169
- 'cloudcover',
170
- 'visibility',
171
- 'solarradiation',
172
- 'solarenergy',
173
- 'uvindex',
174
- 'conditions'
175
- ]
176
-
177
- new_data = pd.DataFrame(
178
- data
179
- ).T
180
- new_data.columns = col_names
181
- for col in col_names:
182
- if col not in ['name', 'date', 'conditions']:
183
- new_data[col] = pd.to_numeric(new_data[col])
184
-
185
- return new_data
186
-
187
- def data_encoder(X):
188
- X.drop(columns=['date', 'name'], inplace=True)
189
- X['conditions'] = OrdinalEncoder().fit_transform(X[['conditions']])
190
- return X
191
-
192
- def transform(df):
193
- df.loc[df["windgust"].isna(),'windgust'] = df['windspeed']
194
- df['snow'].fillna(0,inplace=True)
195
- df['snowdepth'].fillna(0, inplace=True)
196
- df['pressure'].fillna(df['pressure'].mean(), inplace=True)
197
- return df
198
-
199
-
200
- def get_aplevel(temps:np.ndarray) -> list:
201
- boundary_list = np.array([0, 50, 100, 150, 200, 300]) # assert temps.shape == [x, 1]
202
- redf = np.logical_not(temps<=boundary_list) # temps.shape[0] x boundary_list.shape[0] ndarray
203
- hift = np.concatenate((np.roll(redf, -1)[:, :-1], np.full((temps.shape[0], 1), False)), axis = 1)
204
- cat = np.nonzero(np.not_equal(redf,hift))
205
-
206
- air_pollution_level = ['Good', 'Moderate', 'Unhealthy for sensitive Groups','Unhealthy' ,'Very Unhealthy', 'Hazardous']
207
- level = [air_pollution_level[el] for el in cat[1]]
208
- return level
209
-
210
- def timestamp_2_time(x):
211
- dt_obj = datetime.datetime.strptime(str(x), '%Y-%m-%d')
212
- dt_obj = dt_obj.timestamp() * 1000
213
- return int(dt_obj)