Mei000 commited on
Commit
7a3d406
1 Parent(s): d446abe

Upload functions.py

Browse files
Files changed (1) hide show
  1. functions.py +138 -0
functions.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import os
3
+ import joblib
4
+ import pandas as pd
5
+ import datetime
6
+ import numpy as np
7
+ import time
8
+ from sklearn.preprocessing import OrdinalEncoder
9
+ from dotenv import load_dotenv
10
+ load_dotenv(override=True)
11
+
12
+
13
+ def decode_features(df, feature_view):
14
+ """Decodes features in the input DataFrame using corresponding Hopsworks Feature Store transformation functions"""
15
+ df_res = df.copy()
16
+
17
+ import inspect
18
+
19
+
20
+ td_transformation_functions = feature_view._batch_scoring_server._transformation_functions
21
+
22
+ res = {}
23
+ for feature_name in td_transformation_functions:
24
+ if feature_name in df_res.columns:
25
+ td_transformation_function = td_transformation_functions[feature_name]
26
+ sig, foobar_locals = inspect.signature(td_transformation_function.transformation_fn), locals()
27
+ param_dict = dict([(param.name, param.default) for param in sig.parameters.values() if param.default != inspect._empty])
28
+ if td_transformation_function.name == "min_max_scaler":
29
+ df_res[feature_name] = df_res[feature_name].map(
30
+ lambda x: x * (param_dict["max_value"] - param_dict["min_value"]) + param_dict["min_value"])
31
+
32
+ elif td_transformation_function.name == "standard_scaler":
33
+ df_res[feature_name] = df_res[feature_name].map(
34
+ lambda x: x * param_dict['std_dev'] + param_dict["mean"])
35
+ elif td_transformation_function.name == "label_encoder":
36
+ dictionary = param_dict['value_to_index']
37
+ dictionary_ = {v: k for k, v in dictionary.items()}
38
+ df_res[feature_name] = df_res[feature_name].map(
39
+ lambda x: dictionary_[x])
40
+ return df_res
41
+
42
+
43
+ def get_model(project, model_name, evaluation_metric, sort_metrics_by):
44
+ """Retrieve desired model or download it from the Hopsworks Model Registry.
45
+ In second case, it will be physically downloaded to this directory"""
46
+ TARGET_FILE = "model.pkl"
47
+ list_of_files = [os.path.join(dirpath,filename) for dirpath, _, filenames \
48
+ in os.walk('.') for filename in filenames if filename == TARGET_FILE]
49
+
50
+ if list_of_files:
51
+ model_path = list_of_files[0]
52
+ model = joblib.load(model_path)
53
+ else:
54
+ if not os.path.exists(TARGET_FILE):
55
+ mr = project.get_model_registry()
56
+ # get best model based on custom metrics
57
+ model = mr.get_best_model(model_name,
58
+ evaluation_metric,
59
+ sort_metrics_by)
60
+ model_dir = model.download()
61
+ model = joblib.load(model_dir + "/model.pkl")
62
+
63
+ return model
64
+
65
+
66
+ def get_weather_data_weekly(city: str, start_date: datetime) -> pd.DataFrame:
67
+ #WEATHER_API_KEY = os.getenv('WEATHER_API_KEY')
68
+ ##end_date = f"{start_date + datetime.timedelta(days=6):%Y-%m-%d}"
69
+ next7days_weather=pd.read_csv('https://weather.visualcrossing.com/VisualCrossingWebServices/rest/services/timeline/Beijing/next7days?unitGroup=metric&include=days&key=5WNL2M94KKQ4R4F32LFV8DPE4&contentType=csv')
70
+ #answer = requests.get(f'https://weather.visualcrossing.com/VisualCrossingWebServices/rest/services/timeline/{city}/{start_date}/{end_date}?unitGroup=metric&include=days&key={WEATHER_API_KEY}&contentType=json').json()
71
+
72
+
73
+ df_weather = pd.DataFrame(next7days_weather)
74
+ df_weather.rename(columns = {"datetime": "date"},
75
+ inplace = True)
76
+ df_weather.rename(columns = {"name": "city"},
77
+ inplace = True)
78
+ df_weather.rename(columns = {"sealevelpressure": "pressure"},
79
+ inplace = True)
80
+ df_weather = df_weather.drop(labels=['city','dew','precip','tempmax','pressure','tempmin','temp','feelslikemax','feelslikemin','feelslike','precipprob','precipcover','snow','snowdepth','cloudcover','severerisk','moonphase','preciptype','sunrise','sunset','conditions','description','icon','stations'], axis=1) #删除不用的列
81
+
82
+ return df_weather
83
+
84
+ def get_weather_df(data):
85
+ col_names = [
86
+ 'name',
87
+ 'date',
88
+ 'tempmax',
89
+ 'tempmin',
90
+ 'temp',
91
+ 'feelslikemax',
92
+ 'feelslikemin',
93
+ 'feelslike',
94
+ 'dew',
95
+ 'humidity',
96
+ 'precip',
97
+ 'precipprob',
98
+ 'precipcover',
99
+ 'snow',
100
+ 'snowdepth',
101
+ 'windgust',
102
+ 'windspeed',
103
+ 'winddir',
104
+ 'pressure',
105
+ 'cloudcover',
106
+ 'visibility',
107
+ 'solarradiation',
108
+ 'solarenergy',
109
+ 'uvindex',
110
+ 'conditions'
111
+ ]
112
+
113
+ new_data = pd.DataFrame(
114
+ data
115
+ ).T
116
+ new_data.columns = col_names
117
+ for col in col_names:
118
+ if col not in ['name', 'date', 'conditions']:
119
+ new_data[col] = pd.to_numeric(new_data[col])
120
+
121
+ return new_data
122
+
123
+
124
+
125
+ def get_aplevel(temps:np.ndarray) -> list:
126
+ boundary_list = np.array([0, 50, 100, 150, 200, 300]) # assert temps.shape == [x, 1]
127
+ redf = np.logical_not(temps<=boundary_list) # temps.shape[0] x boundary_list.shape[0] ndarray
128
+ hift = np.concatenate((np.roll(redf, -1)[:, :-1], np.full((temps.shape[0], 1), False)), axis = 1)
129
+ cat = np.nonzero(np.not_equal(redf,hift))
130
+
131
+ air_pollution_level = ['Good', 'Moderate', 'Unhealthy for sensitive Groups','Unhealthy' ,'Very Unhealthy', 'Hazardous']
132
+ level = [air_pollution_level[el] for el in cat[1]]
133
+ return level
134
+
135
+ def timestamp_2_time(x):
136
+ dt_obj = datetime.datetime.strptime(str(x), '%Y-%m-%d')
137
+ dt_obj = dt_obj.timestamp() * 1000
138
+ return int(dt_obj)