# -*- coding: utf-8 -*-
import os
import pandas as pd
import matplotlib.pyplot as plt
import time
from datetime import datetime
import joblib as job
from xgboost import XGBRegressor
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import mean_squared_error, mean_absolute_error, mean_absolute_percentage_error
from rich.traceback import install
from warnings import filterwarnings
from utils.log import Logger
from utils.common import preprocessing, TIME_FORMAT, LOG_TIME_FORMAT

install()  # 报错警告信息优化
filterwarnings('ignore', module='sklearn')  # 忽略警告信息
# 设置日志文件名、日志级别
logfile_name = 'train-' + datetime.now().strftime(LOG_TIME_FORMAT)
logfile = Logger(root_path='../', log_name=logfile_name).get_logger(log_dir='log/train')
# 读取数据
power_load_data = preprocessing('../data/train.csv')


def data_analyze(data: pd.DataFrame) -> None:
    """
    ## data_analyze\n
    Exploratory analysis of power load data, analyzing the distribution of power load,
    analyzing the trend of average load per hour, analyzing the trend of average load
    per month, and analyzing whether there is a significant difference in average hourly
    load between weekdays and weekends
    :param data: Power load data
    """
    # 分析电力负荷的分布情况
    analyze_data = data.copy(deep=True)
    fig = plt.figure(figsize=(20, 40))
    ax1 = fig.add_subplot(411)
    ax1.hist(analyze_data['power_load'], bins=100)
    ax1.set_title('Histogram of power load distribution')
    ax1.set_ylabel('load')
    # 分析各小时下平均负荷的趋势性11-18点
    analyze_data['hour'] = analyze_data['time'].str[11:13]
    hour_load_avg = analyze_data.groupby('hour', as_index=False)['power_load'].mean()
    ax2 = fig.add_subplot(412)
    ax2.plot(hour_load_avg['hour'], hour_load_avg['power_load'])
    ax2.set_title('Average load per hour')
    ax2.set_xlabel('hour')
    ax2.set_ylabel('load')
    # 分析各月份下平均负荷的趋势性每年的6-9月份
    analyze_data['month'] = analyze_data['time'].str[5:7]
    month_load_avg = analyze_data.groupby('month', as_index=False)['power_load'].mean()
    ax3 = fig.add_subplot(413)
    ax3.plot(month_load_avg['month'], month_load_avg['power_load'])
    ax3.set_title('Average load per month')
    ax3.set_xlabel('month')
    ax3.set_ylabel('load')
    # 分析工作日和周末平均小时负荷是否有较大区别
    analyze_data['weekday'] = analyze_data['time'].apply(lambda x: pd.to_datetime(x).weekday())
    analyze_data['is_holiday'] = analyze_data['weekday'].apply(lambda x: 1 if x >= 5 else 0)
    weekday_load_avg = analyze_data[analyze_data['is_holiday'] == 0]['power_load'].mean()
    holiday_load_avg = analyze_data[analyze_data['is_holiday'] == 1]['power_load'].mean()
    ax4 = fig.add_subplot(414)
    ax4.bar(x=['weekday', 'weekend'], height=[weekday_load_avg, holiday_load_avg])
    ax4.set_title('Average hourly load on weekdays & weekends')
    ax4.set_ylabel('average load')
    plt.savefig('../images/power_load_analysis.png')
    logfile.info('`power_load_analysis.png` saved!')


def feature_engineering(data: pd.DataFrame) -> tuple:
    """
    ## feature_engineering\n
    Feature engineering processing on power load data: extract hourly features,
    extract monthly features, extract recent historical loads under past-time
    windows, and extract historical loads at the same time yesterday
    :param data: Power load data
    :return: Feature engineering processed data and feature column names
    """
    start_time = time.time()
    logfile.info('Start feature engineering')
    feature_data = data.copy(deep=True)
    feature_data['hour'] = feature_data['time'].str[11:13]
    feature_data['month'] = feature_data['time'].str[5:7]
    # 对小时特征和月份特征进行 one-hot encoder
    feature_data_time = pd.get_dummies(feature_data[['hour', 'month']])
    # 将时间特征数据合并到 feature_data
    feature_data = pd.concat([feature_data, feature_data_time], axis=1)
    # 获取过去1小时负荷、过去2小时负荷、过去3小时负荷
    last_time_load = [feature_data['power_load'].shift(i) for i in range(1, 4)]
    # 过去3小时的电力负荷
    shift_data = pd.concat(last_time_load, axis=1)
    shift_data.columns = ['前1小时负荷', '前2小时负荷', '前3小时负荷']
    feature_data = pd.concat([feature_data, shift_data], axis=1)
    # 提取昨日同时刻的历史负荷
    feature_data['yesterday_time'] = feature_data['time'].apply(
        lambda x: (pd.to_datetime(x) - pd.to_timedelta('1d')).strftime(TIME_FORMAT))
    # 维护一个字典, 保存时间:负荷
    time_load_dict = feature_data.set_index('time')['power_load'].to_dict()
    feature_data['yesterday_load'] = feature_data['yesterday_time'].apply(lambda x: time_load_dict.get(x))
    # 删除空值
    feature_data.dropna(axis=0, inplace=True)
    feature_cols = list(feature_data_time.columns) + list(shift_data.columns) + ['yesterday_load']
    end_time = time.time()
    logfile.info('End feature engineering')
    logfile.info(f'Processing time: {end_time - start_time}s')
    return feature_data, feature_cols


def train(feature_data: pd.DataFrame, feature_col: list) -> None:
    """
    ## train\n
    Model training module, conducting model training, evaluating and saving
    :param feature_data: Feature engineering processed data
    :param feature_col: Feature column names
    """
    # 数据集切分
    x = feature_data[feature_col]
    y = feature_data['power_load']
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=22)
    # # 网格搜索+交叉验证寻找最优超参数组合
    estimator = XGBRegressor()
    param_grid = {
        "n_estimators": range(50, 210, 50),
        "max_depth": range(1, 10, 2),
        "learning_rate": [0.01, 0.05, 0.1]
    }
    start_time = time.time()
    logfile.info('Start grid search')
    grid_search = GridSearchCV(estimator=estimator, param_grid=param_grid, cv=5)
    grid_search.fit(x_train, y_train)
    best_params = grid_search.best_params_
    best_estimator = grid_search.best_estimator_
    logfile.info(f'Best parameters: {best_params}')
    # 实例化模型并训练
    xgb_regressor = best_estimator
    xgb_regressor.fit(x_train, y_train)
    # 模型评估
    y_predict = xgb_regressor.predict(x_test)
    mse = mean_squared_error(y_test, y_predict)
    mae = mean_absolute_error(y_test, y_predict)
    mape = mean_absolute_percentage_error(y_test, y_predict)
    logfile.info(f'MSE: {mse}')
    logfile.info(f'MAE: {mae}')
    logfile.info(f'MAPE: {mape}')
    end_time = time.time()
    logfile.info('End grid search')
    logfile.info(f'Processing time: {end_time - start_time}s')
    # 模型保存
    job.dump(xgb_regressor, '../model/xgb_power_load.pth')
    logfile.info('`xgb_power_load.pth` saved!')


if __name__ == '__main__':
    # 如果 `power_load_analysis.png` 存在则不运行 `data_analyze` 函数
    try:
        if not os.path.exists('../images/power_load_analysis.png'):
            data_analyze(power_load_data)
        else:
            logfile.warning('`power_load_analysis.png` already exists!')
    except Exception as e:
        logfile.error(e)
    # 如果 `xgb_power_load.pth` 存在则不运行 `train` 函数
    try:
        if not os.path.exists('../model/xgb_power_load.pth'):
            processed_data, feature_columns = feature_engineering(power_load_data)
            train(processed_data, feature_columns)
        else:
            logfile.warning('`xgb_power_load.pth` already exists!')
    except Exception as e:
        logfile.error(e)
