import pandas as pd
import numpy as np
from sklearn.model_selection import KFold, cross_val_score, RandomizedSearchCV
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
import datetime
import ModelsPlot as model_plot

monthly_data = pd.read_csv('USA_Marco.csv', index_col=0)

print(monthly_data.head(10))
monthly_data = monthly_data.loc[[x for x in monthly_data.index if x == x and '/' in x]]
monthly_data.index = [pd.to_datetime(x) for x in monthly_data.index]
monthly_data['Dates'] = [int(x.strftime('%Y%m')) for x in monthly_data.index]

merged = monthly_data
Ys_table = pd.read_csv('US_Marco_predictors.csv')
print(Ys_table.head())
used_Ytable = Ys_table[['Dates', 'CPIAUCSLgrowth']]

merged = pd.merge(merged, used_Ytable, on='Dates')

st_month = 201001
ed_month = 201912
Xtodrop = ['Dates', 'y']  ##删掉预测值和不能用的指标
Xtodrop_add = [x for x in merged.columns if 'CPI' in x or 'cpi' in x]
Xtodrop = Xtodrop + Xtodrop_add

merged['y'] = merged['CPIAUCSLgrowth'].shift(-1)
whole_data = merged[(merged['Dates'] >= st_month) & (merged['Dates'] <= ed_month)].reset_index(drop=True)
whole_data = whole_data.fillna(0)

print("Whole data description after dropping columns and filling NaNs:")
print(whole_data.drop(columns=Xtodrop).describe().T)

def Norm(in_df, no_Norm):
    op_df = in_df.copy()
    for col in op_df.columns:
        if col in no_Norm:
            continue
        else:
            col_max = max(op_df[col])
            col_min = min(op_df[col])
            if col_max == col_min:
                continue
            op_df[col] = (op_df[col] - col_min) / (col_max - col_min)
    return op_df

all_month_list = list(whole_data['Dates'].values)
train_month_n = 24
test_month_n = 12
oos_month_n = 1

rf_op = []
gbdt_op = []

for i in range(len(all_month_list)):
    if i < train_month_n + test_month_n + oos_month_n - 1:
        continue
    else:
        train_monthes = all_month_list[i - test_month_n - train_month_n:i - test_month_n]
        test_monthes = all_month_list[i - test_month_n:i]
        oos_month = all_month_list[i]
        print(f"Processing OOS month: {oos_month}")

        train_data = whole_data[whole_data['Dates'].apply(lambda x: True if x in train_monthes else False)]
        test_data = whole_data[whole_data['Dates'].apply(lambda x: True if x in test_monthes else False)]
        oos_data = whole_data[whole_data['Dates'] == oos_month]

        X_train = train_data.drop(columns=Xtodrop)
        y_train = train_data['y']

        X_test = test_data.drop(columns=Xtodrop)
        y_test = test_data['y']

        X_oos = oos_data.drop(columns=Xtodrop)
        y_oos = oos_data['y']

        to_Norm = pd.concat([train_data, test_data, oos_data])
        normed_data = Norm(to_Norm, Xtodrop)
        train_data_normed = normed_data[normed_data['Dates'].apply(lambda x: True if x in train_monthes else False)]
        test_data_normed = normed_data[normed_data['Dates'].apply(lambda x: True if x in test_monthes else False)]
        oos_data_normed = normed_data[normed_data['Dates'] == oos_month]

        X_train_normed = train_data_normed.drop(columns=Xtodrop)
        y_train = train_data['y']

        X_test_normed = test_data_normed.drop(columns=Xtodrop)
        y_test = test_data['y']

        X_oos_normed = oos_data_normed.drop(columns=Xtodrop)
        y_oos = oos_data['y']

        print(f"Training data shape: {X_train.shape}, Test data shape: {X_test.shape}, OOS data shape: {X_oos.shape}")

        # Ensure y_test and y_oos are not empty
        if len(y_test) == 0 or len(y_oos) == 0:
            print(f"Skipping OOS month: {oos_month} due to empty y_test or y_oos")
            continue

        # Ensure y_test and y_oos have the same length
        if len(y_test) != len(y_oos):
            print(f"Skipping OOS month: {oos_month} due to mismatched lengths of y_test and y_oos")
            continue

        rf_result = model_plot.RandomForest_method(X_train, y_train, X_test, y_test, X_oos, test_data, oos_data)
        gbdt_result = model_plot.GBDT_method(X_train, y_train, X_test, y_test, X_oos, test_data, oos_data)

        rf_op.append(rf_result)
        gbdt_op.append(gbdt_result)

# Check if rf_op and gbdt_op are not empty
if not rf_op:
    print("No valid results for RandomForest. Skipping further processing.")
else:
    # deal with randomforest
    all_pred = []
    all_best = []
    all_coef = []
    for i in range(len(all_month_list)):
        if i < train_month_n + test_month_n + oos_month_n - 1:
            continue
        else:
            result_idx = i - (train_month_n + test_month_n + oos_month_n - 1)
            if result_idx >= len(rf_op):
                continue
            oos_month = all_month_list[i]

            temp_tuning = rf_op[result_idx][0]
            temp_best = rf_op[result_idx][1]
            temp_coef = rf_op[result_idx][2]

            all_best.append(temp_best)

            temp_coef = pd.DataFrame(temp_coef).T
            temp_coef['AlphaValue'] = temp_coef.index
            temp_coef['Dates'] = oos_month
            all_coef.append(temp_coef)
            all_pred.append(temp_tuning)

    all_pred = pd.concat(all_pred)
    all_pred['Dates'] = [datetime.datetime(year=int(x // 100), month=int(x % 100), day=28) for x in all_pred['Dates']]
    all_pred = all_pred.sort_values(['n_estimators', 'Dates'])
    all_pred = all_pred.set_index(['n_estimators', 'Dates'])

    n_es = [2, 3, 4, 5, 6]
    for n in n_es:
        alpha_pred = all_pred.loc[n]
        msfe = np.sum((alpha_pred['yhat'] - alpha_pred['y']) ** 2) / len(alpha_pred)
        print(f'MSFE (n = {n}): {msfe}')
        r2 = 1 - np.sum((alpha_pred['yhat'] - alpha_pred['y']) ** 2) / np.sum(alpha_pred['y'] ** 2)
        print(f'R2 (n = {n}): {r2}')

        alpha_pred[['y', 'yhat']].plot(figsize=(10, 7), title=f'RandomForest (n_estimators = {n})')
        plt.show()

        abs(alpha_pred['y'] - alpha_pred['yhat']).plot(figsize=(10, 7), title=f'RandomForest abs error (n_estimators = {n})')
        plt.show()

# Check if gbdt_op is not empty
if not gbdt_op:
    print("No valid results for GBDT. Skipping further processing.")
else:
    # deal with gbdt
    all_pred = []
    all_best = []
    all_coef = []
    for i in range(len(all_month_list)):
        if i < train_month_n + test_month_n + oos_month_n - 1:
            continue
        else:
            result_idx = i - (train_month_n + test_month_n + oos_month_n - 1)
            if result_idx >= len(gbdt_op):
                continue
            oos_month = all_month_list[i]

            temp_tuning = gbdt_op[result_idx][0]
            temp_best = gbdt_op[result_idx][1]
            temp_coef = gbdt_op[result_idx][2]

            all_best.append(temp_best)

            temp_coef = pd.DataFrame(temp_coef).T
            temp_coef['AlphaValue'] = temp_coef.index
            temp_coef['Dates'] = oos_month
            all_coef.append(temp_coef)
            all_pred.append(temp_tuning)

    all_pred = pd.concat(all_pred)
    all_pred['Dates'] = [datetime.datetime(year=int(x // 100), month=int(x % 100), day=28) for x in all_pred['Dates']]
    all_pred = all_pred.sort_values(['n_estimators', 'Dates'])
    all_pred = all_pred.set_index(['n_estimators', 'Dates'])

    n_es = [2, 3, 4, 5, 6]
    for n in n_es:
        alpha_pred = all_pred.loc[n]
        msfe = np.sum((alpha_pred['yhat'] - alpha_pred['y']) ** 2) / len(alpha_pred)
        print(f'MSFE (n = {n}): {msfe}')
        r2 = 1 - np.sum((alpha_pred['yhat'] - alpha_pred['y']) ** 2) / np.sum(alpha_pred['y'] ** 2)
        print(f'R2 (n = {n}): {r2}')

        alpha_pred[['y', 'yhat']].plot(figsize=(10, 7), title=f'GBDT (n_estimators = {n})')
        plt.show()

        abs(alpha_pred['y'] - alpha_pred['yhat']).plot(figsize=(10, 7), title=f'GBDT abs error (n_estimators = {n})')
        plt.show()
