import pandas as pd
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import r2_score
from sklearn.compose import ColumnTransformer
import joblib


def feature_extraction(city):
    # TODO 特征提取
    data = pd.read_csv('../data/machine_data.csv')
    data = data[data['city'] == city].sort_values('date')
    columns = ['city', 'cur_high_tem', 'cur_low_tem', 'cur_weather',
               'day1_high_tem', 'day1_low_tem', 'day1_weather', 'day1_wind', 'day1_month',
               'day2_high_tem', 'day2_low_tem', 'day2_weather', 'day2_wind', 'day2_month',
               'day3_high_tem', 'day3_low_tem', 'day3_weather', 'day3_wind', 'day3_month',
               'day4_high_tem', 'day4_low_tem', 'day4_weather', 'day4_wind', 'day4_month',
               'day5_high_tem', 'day5_low_tem', 'day5_weather', 'day5_wind', 'day5_month',
               'day6_high_tem', 'day6_low_tem', 'day6_weather', 'day6_wind', 'day6_month',
               'day7_high_tem', 'day7_low_tem', 'day7_weather', 'day7_wind', 'day7_month',
               ]
    res = pd.DataFrame(columns=columns)
    for i in range(7, len(data)):
        item = []
        for j in range(8):
            now = data.iloc[i-j, :]
            if j == 0:
                item.append(city)
                item.append(now['hightest_tem'])
                item.append(now['lowest_tem'])
                item.append(now['weather'])
            else:
                item.append(now['hightest_tem'])
                item.append(now['lowest_tem'])
                item.append(now['weather'])
                item.append(now['wind_direction'])
                item.append(now['date'][:7])
        item = pd.DataFrame([item], columns=columns)
        res = pd.concat([res, item], axis=0)
    res.to_csv('../data/feature/changsha_feature.csv', index=False)
    print(res.info())
    print(res.describe())
    pass


def get_max_data(data):
    del data['day1_low_tem']
    del data['day2_low_tem']
    del data['day3_low_tem']
    del data['day4_low_tem']
    del data['day5_low_tem']
    del data['day6_low_tem']
    del data['day7_low_tem']
    del data['cur_low_tem']
    del data['cur_weather']
    del data['city']
    label_encoder = LabelEncoder()
    for i in range(1, 8):
        s = 'day' + str(i)
        s1 = s + '_high_tem'
        s2 = s + '_weather'
        s3 = s + '_wind'
        s4 = s + '_month'
        data[s1] = pd.to_numeric(data[s1])
        data[s2] = label_encoder.fit_transform(data[s2])
        data[s3] = label_encoder.fit_transform(data[s3])
        data[s4] = label_encoder.fit_transform(data[s4])
    y = data['cur_high_tem']
    del data['cur_high_tem']
    x = data
    return x, y
    pass


def get_min_data(data):
    del data['day1_high_tem']
    del data['day2_high_tem']
    del data['day3_high_tem']
    del data['day4_high_tem']
    del data['day5_high_tem']
    del data['day6_high_tem']
    del data['day7_high_tem']
    del data['cur_high_tem']
    del data['cur_weather']
    del data['city']
    label_encoder = LabelEncoder()
    for i in range(1, 8):
        s = 'day' + str(i)
        s1 = s + '_low_tem'
        s2 = s + '_weather'
        s3 = s + '_wind'
        s4 = s + '_month'
        data[s1] = pd.to_numeric(data[s1])
        data[s2] = label_encoder.fit_transform(data[s2])
        data[s3] = label_encoder.fit_transform(data[s3])
        data[s4] = label_encoder.fit_transform(data[s4])
    y = data['cur_low_tem']
    del data['cur_low_tem']
    x = data
    return x, y
    pass


def train_all_model(city):
    # TODO 训练机器学习模型
    # 获取数据
    data = pd.read_csv('../data/feature/changsha_feature.csv')
    x, y = get_max_data(data)
    # 数据划分
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=7)
    # 训练模型
    estimator = RandomForestRegressor(min_samples_leaf=5)
    estimator.fit(x_train, y_train)
    y_predict = estimator.predict(x_test)
    # 模型评估
    score = r2_score(y_test, y_predict)
    print(score)
    # 保存模型
    joblib.dump(estimator, 'model_ckpt/changsha_high_model.pkl')

    # 获取数据
    data = pd.read_csv('../data/feature/changsha_feature.csv')
    x, y = get_min_data(data)
    # 数据划分
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=7)
    # 训练模型
    estimator = RandomForestRegressor()
    estimator.fit(x_train, y_train)
    y_predict = estimator.predict(x_test)
    # 模型评估
    score = r2_score(y_test, y_predict)
    print(score)
    # 保存模型
    joblib.dump(estimator, 'model_ckpt/changsha_low_model.pkl')


if __name__ == '__main__':
    # feature_extraction('长沙')
    train_all_model('长沙')

# 长沙,3,0,雨,3,0,雨,北风,2011-01,3,0,雨,北风,2011-01,3,0,雨,北风,2011-01,3,0,雨,北风,2011-01,3,0,雨,北风,2011-01,3,0,雨,北风,2011-01,3,0,雨,北风,2011-01