import pandas as pd
import numpy as np
from sklearn.model_selection import KFold, cross_val_score, RandomizedSearchCV
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
import datetime
import ModelsPlot as model_plot

# 读取数据时忽略列名中的空格
Macro_data = pd.read_csv('CHN_Macro_sample.csv', index_col=0, skipinitialspace=True)
Ys_table = pd.read_csv('CHN_Marco_predictors.csv', skipinitialspace=True)  # 不设置索引列

# 检查列名是否正确
print("Ys_table columns:", Ys_table.columns)
print("Ys_table head:", Ys_table.head())

# 确保 'Dates' 列存在
if 'Dates' not in Ys_table.columns:
    raise KeyError("'Dates' column not found in Ys_table")

used_Ytable = Ys_table[['Dates', 'CPIgrowth']]
merged = pd.merge(Macro_data, used_Ytable, on='Dates')

st_month = 201001
ed_month = 201912
Xtodrop = ['Dates', 'y']  # 删掉预测值和不能用的指标
Xtodrop_add = [x for x in merged.columns if 'CPI' in x or 'cpi' in x]
Xtodrop = Xtodrop + Xtodrop_add

merged['y'] = merged['CPIgrowth'].shift(-1)  # 预测变量，预测下一个月
whole_data = merged[(merged['Dates'] >= st_month) & (merged['Dates'] <= ed_month)].reset_index(drop=True)
whole_data = whole_data.fillna(0)

def Norm(in_df, no_Norm):
    op_df = in_df.copy()
    for col in op_df.columns:
        if col in no_Norm:
            continue
        else:
            col_max = max(op_df[col])
            col_min = min(op_df[col])
            if col_max == col_min:
                continue
            op_df[col] = (op_df[col] - col_min) / (col_max - col_min)
    return op_df

all_month_list = list(whole_data['Dates'].values)
train_month_n = 24
test_month_n = 12
oos_month_n = 1

rf_op = []
gbdt_op = []

for i in range(len(all_month_list)):
    if i < train_month_n + test_month_n + oos_month_n - 1:
        continue
    else:
        train_monthes = all_month_list[i-test_month_n-train_month_n:i-test_month_n]
        test_monthes = all_month_list[i-test_month_n:i]
        oos_month = all_month_list[i]
        print(f"Processing month: {oos_month}")

        train_data = whole_data[whole_data['Dates'].apply(lambda x: True if x in train_monthes else False)]
        test_data = whole_data[whole_data['Dates'].apply(lambda x: True if x in test_monthes else False)]
        oos_data = whole_data[whole_data['Dates'] == oos_month]

        X_train = train_data.drop(columns=Xtodrop)
        y_train = train_data['y']

        X_test = test_data.drop(columns=Xtodrop)
        y_test = test_data['y']

        X_oos = oos_data.drop(columns=Xtodrop)
        y_oos = oos_data['y']

        to_Norm = pd.concat([train_data, test_data, oos_data])
        normed_data = Norm(to_Norm, Xtodrop)
        train_data_normed = normed_data[normed_data['Dates'].apply(lambda x: True if x in train_monthes else False)]
        test_data_normed = normed_data[normed_data['Dates'].apply(lambda x: True if x in test_monthes else False)]
        oos_data_normed = normed_data[normed_data['Dates'] == oos_month]

        X_train_normed = train_data_normed.drop(columns=Xtodrop)
        y_train_normed = train_data_normed['y']

        X_test_normed = test_data_normed.drop(columns=Xtodrop)
        y_test_normed = test_data_normed['y']

        X_oos_normed = oos_data_normed.drop(columns=Xtodrop)
        y_oos_normed = oos_data_normed['y']

        print(f"X_train_normed shape: {X_train_normed.shape}, y_train_normed shape: {y_train_normed.shape}")
        print(f"X_test_normed shape: {X_test_normed.shape}, y_test_normed shape: {y_test_normed.shape}")
        print(f"X_oos_normed shape: {X_oos_normed.shape}, y_oos_normed shape: {y_oos_normed.shape}")

        # 检查 y_train_normed 和 y_test_normed 是否有 NaN 值
        if y_train_normed.isnull().any():
            print(f"y_train_normed has NaN values: {y_train_normed[y_train_normed.isnull()]}")
        if y_test_normed.isnull().any():
            print(f"y_test_normed has NaN values: {y_test_normed[y_test_normed.isnull()]}")

        # 调用 RandomForest_method 函数
        y_pred, res1, feature_importances = model_plot.RandomForest_method(X_train_normed, y_train_normed, X_test_normed, y_test_normed, X_oos_normed, test_data, oos_data)

        rf_result = (y_test_normed, y_pred, feature_importances)
        gbdt_result = model_plot.GBDT_method(X_train_normed, y_train_normed, X_test_normed, y_test_normed, X_oos_normed, test_data, oos_data)

        rf_op.append(rf_result)
        gbdt_op.append(gbdt_result)

# deal with randomforest
all_pred = []
all_best = []
all_coef = []
for i in range(len(all_month_list)):
    if i < train_month_n + test_month_n + oos_month_n - 1:
        continue
    else:
        result_idx = i - (train_month_n + test_month_n + oos_month_n - 1)
        oos_month = all_month_list[i]

        temp_tuning = rf_op[result_idx][0]
        temp_best = rf_op[result_idx][1]
        temp_coef = rf_op[result_idx][2]

        all_best.append(temp_best)

        temp_coef = pd.DataFrame(temp_coef).T
        temp_coef['AlphaValue'] = temp_coef.index
        temp_coef['Dates'] = oos_month
        all_coef.append(temp_coef)
        all_pred.append(temp_tuning)

all_pred = pd.concat(all_pred)
all_pred['Dates'] = [datetime.datetime(year=int(x//100), month=int(x%100), day=28) for x in all_pred['Dates']]
all_pred = all_pred.sort_values(['n_estimators', 'Dates'])
all_pred = all_pred.set_index(['n_estimators', 'Dates'])

n_es = [2, 3, 4, 5, 6]
for n in n_es:
    alpha_pred = all_pred.loc[n]
    msfe = np.sum((alpha_pred['yhat'] - alpha_pred['y']) ** 2) / len(alpha_pred)
    print('MSFE: ', msfe)
    r2 = 1 - np.sum((alpha_pred['yhat'] - alpha_pred['y']) ** 2) / np.sum(alpha_pred['y'] ** 2)
    print('R2: (n = %s)' % n, r2)

    alpha_pred[['y', 'yhat']].plot(figsize=(10, 7), title='RandomForest (n_estimators = %s)' % n)
    plt.show()

    abs(alpha_pred['y'] - alpha_pred['yhat']).plot(figsize=(10, 7), title='RandomForest abs error (n_estimators = %s)' % n)
    plt.show()

# deal with gbdt
all_pred = []
all_best = []
all_coef = []
for i in range(len(all_month_list)):
    if i < train_month_n + test_month_n + oos_month_n - 1:
        continue
    else:
        result_idx = i - (train_month_n + test_month_n + oos_month_n - 1)
        oos_month = all_month_list[i]

        temp_tuning = gbdt_op[result_idx][0]
        temp_best = gbdt_op[result_idx][1]
        temp_coef = gbdt_op[result_idx][2]

        all_best.append(temp_best)

        temp_coef = pd.DataFrame(temp_coef).T
        temp_coef['AlphaValue'] = temp_coef.index
        temp_coef['Dates'] = oos_month
        all_coef.append(temp_coef)
        all_pred.append(temp_tuning)

all_pred = pd.concat(all_pred)
all_pred['Dates'] = [datetime.datetime(year=int(x//100), month=int(x%100), day=28) for x in all_pred['Dates']]
all_pred = all_pred.sort_values(['n_estimators', 'Dates'])
all_pred = all_pred.set_index(['n_estimators', 'Dates'])

n_es = [2, 3, 4, 5, 6]
for n in n_es:
    alpha_pred = all_pred.loc[n]
    msfe = np.sum((alpha_pred['yhat'] - alpha_pred['y']) ** 2) / len(alpha_pred)
    print('MSFE: ', msfe)
    r2 = 1 - np.sum((alpha_pred['yhat'] - alpha_pred['y']) ** 2) / np.sum(alpha_pred['y'] ** 2)
    print('R2: (n = %s)' % n, r2)

    alpha_pred[['y', 'yhat']].plot(figsize=(10, 7), title='GBDT (n_estimators = %s)' % n)
    plt.show()

    abs(alpha_pred['y'] - alpha_pred['yhat']).plot(figsize=(10, 7), title='GBDT abs error (n_estimators = %s)' % n)
    plt.show()
