import pandas as pd
from feature_engineer import *

# load files:
train_df = pd.read_csv('data/train.csv')
client_df = pd.read_csv('data/client.csv')
ele_price_df= pd.read_csv('data/electricity_prices.csv')
gas_prices_df = pd.read_csv('data/gas_prices.csv')
historical_weather_df = pd.read_csv('data/historical_weather.csv')
forecast_weather_df = pd.read_csv('data/forecast_weather.csv')
# location = pd.read_csv('data/predict-energy-behavior-of-prosumers/weather_station_to_county_mapping.csv')


if __name__ == '__main__':
    # Create all features
    N_day_lags = 15  # Specify how many days we want to go back (at least 2)

    FeatureProcessor = FeatureProcessorClass()

    data = FeatureProcessor(data=train_df.copy(),
                            client=client_df.copy(),
                            historical_weather=historical_weather_df.copy(),
                            forecast_weather=forecast_weather_df.copy(),
                            electricity=ele_price_df.copy(),
                            gas=gas_prices_df.copy(),
                            )

    df = create_revealed_targets_train(data.copy(),
                                       N_day_lags=N_day_lags)
    # 生成的完整数据集：
    # df[0:10000].to_excel('data/train_with_features.xlsx')

    #### Create single fold split ######
    # Remove empty target row
    target = 'target'
    df = df[df[target].notnull()].reset_index(drop=True)
    a = max(df['data_block_id'].values)

    train_block_id = list(range(0, 600))

    tr = df[df['data_block_id'].isin(train_block_id)]  # first 600 data_block_ids used for training
    val = df[~df['data_block_id'].isin(train_block_id)]  # rest data_block_ids used for validation

    # Remove columns for features
    no_features = ['date',
                   'latitude',
                   'longitude',
                   'data_block_id',
                   'row_id',
                   'hours_ahead',
                   'hour_h',
                   ]

    remove_columns = [col for col in df.columns for no_feature in no_features if no_feature in col]
    remove_columns.append(target)
    features = [col for col in df.columns if col not in remove_columns]
    # PrintColor(f'There are {len(features)} features: {features}')

    # # Reload enefit environment (only in debug mode, otherwise the submission will fail)
    # if DEBUG:
    #     enefit.make_env.__called__ = False
    #     type(env)._state = type(type(env)._state).__dict__['INIT']
    #     iter_test = env.iter_test()

    DEBUG = False
    import xgboost as xgb
    # https://xgboost.readthedocs.io/en/stable/python/python_api.html
    # 关于xgboost的原理待学习：early_stopping_rounds 不指代epoch（这个是深度学习概念），这里指的是
    # 模型在训练过程中迭代构建弱学习器（通常为决策树）的次数。每一轮次对应生成一棵新的决策树，并通过加权累加的方式逐步优化预测结果
    clf = xgb.XGBRegressor(
        device='cpu',
        enable_categorical=True, # 自动推导表列内的数据类型
        objective='reg:absoluteerror',
        n_estimators=2 if DEBUG else 1500, # 树的数量
        early_stopping_rounds=100 # 这个100指的是？ -> 连续100轮没有改善则会停止训练
    )

    clf.fit(X=tr[features],
            y=tr[target],
            eval_set=[(tr[features], tr[target]), (val[features], val[target])], # TODO: 这里为何把训练集又喂入
            verbose=True  # False #True
            )

    print(
        f'Early stopping on best iteration #{clf.best_iteration} with MAE error on validation set of {clf.best_score:.2f}')

    import matplotlib.pyplot as plt
    # Plot RMSE
    results = clf.evals_result()
    train_mae, val_mae = results["validation_0"]["mae"], results["validation_1"]["mae"]
    x_values = range(0, len(train_mae))
    fig, ax = plt.subplots(figsize=(8, 4))
    ax.plot(x_values, train_mae, label="Train MAE")
    ax.plot(x_values, val_mae, label="Validation MAE")
    ax.legend()
    plt.ylabel("MAE Loss")
    plt.title("XGBoost MAE Loss")
    plt.show()

    TOP = 20
    importance_data = pd.DataFrame({'name': clf.feature_names_in_, 'importance': clf.feature_importances_})
    importance_data = importance_data.sort_values(by='importance', ascending=False)

    import seaborn as sns
    import matplotlib.pyplot as plt
    fig, ax = plt.subplots(figsize=(8, 4))
    sns.barplot(data=importance_data[:TOP],
                x='importance',
                y='name'
                )
    patches = ax.patches
    count = 0
    for patch in patches:
        height = patch.get_height()
        width = patch.get_width()
        perc = 100 * importance_data['importance'].iloc[count]  # 100*width/len(importance_data)
        ax.text(width, patch.get_y() + height / 2, f'{perc:.1f}%')
        count += 1

    plt.title(f'The top {TOP} features sorted by importance')
    plt.show()



