# %1 导入包
from matplotlib import pyplot
from scipy.stats import zscore
import pandas as pd
import numpy as np
import warnings
import os
import sys
from contextlib import contextmanager
from sklearn import metrics
from sklearn.decomposition import PCA
from sklearn.impute import KNNImputer
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV, LassoCV, ElasticNetCV
from sklearn.ensemble import GradientBoostingRegressor  # 用 scikit-learn 的 GradientBoostingRegressor 替代xgboost
from sklearn.metrics import r2_score
import torch
import torch.nn as nn
from pytorch_tabnet.tab_model import TabNetRegressor  # pip install pytorch-tabnet
warnings.filterwarnings("ignore")

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)  # 检查是否使用gpu

#%%2 加载数据
# path = r"D:\硕士打工日志\5课程作业\量化投资课\量化作业2\EquityData"
meta_data = pd.read_csv('./19911.csv', index_col=0)
meta_data = meta_data.sort_values(['time', 'code']).reset_index(drop=True)

#%% 数据预处理：划分训练集、测试集
def preprocess_data(x, y):
    train_size = int(len(x) * 0.7)
    train_x, test_x = x[:train_size], x[train_size:]
    train_y, test_y = y[:train_size], y[train_size:]
    return train_x, test_x, train_y, test_y

def return_calc(group):
    group['next_ret'] = group['close_x'].pct_change().shift(-1).fillna(0).values
    return group

# 线性回归
def Linear_Reg(reg_x_var, reg_y_var, intercept_fitting=True):
    model_temp = LinearRegression(fit_intercept=intercept_fitting).fit(reg_x_var, reg_y_var)
    # preds = model_temp.predict(reg_x_var)
    # r2 = r2_score(reg_y_var, preds)
    # print(f'R2: {r2}')
    return model_temp  # preds, r2
# 机器学习模型
# 1 Ridge Regression
class RidgeCV_Reg:
    def __init__(self, alphas=(0.1, 1.0, 10.0)):
        self.model = RidgeCV(alphas=alphas, scoring='neg_mean_squared_error', cv=5)

    def fit(self, x, y):
        self.model.fit(x, y)

    def predict(self, prd_x):
        return self.model.predict(prd_x)


# 2 Lasso Regression
class LassoCV_Reg:
    def __init__(self, alphas=(0.1, 1.0, 10.0)):
        self.model = LassoCV(alphas=alphas, cv=5)

    def fit(self, x, y):
        self.model.fit(x, y)

    def predict(self, prd_x):
        return self.model.predict(prd_x)

# 3 ElasticNet Regression
class ElasticNet_Reg:
    def __init__(self, l1_ratio=0.5, alphas=(0.1, 1.0, 10.0)):
        self.model = ElasticNetCV(l1_ratio=l1_ratio, alphas=alphas, cv=5)

    def fit(self, x, y):
        self.model.fit(x, y)

    def predict(self, prd_x):
        return self.model.predict(prd_x)

# 4 xgboost
# XGBoost模型封装类
class XGBoost_Reg:
    def __init__(self, **params):
        """
        初始化XGBoost模型
        :param params: 模型参数，可以覆盖默认值
        """
        default_params = {'n_estimators': 100, 'max_depth': 3, 'learning_rate': 0.1, 'objective': 'reg:squarederror'}
        self.model = xgb.XGBRegressor(**{**default_params, **params})

    def fit(self, x, y):
        """
        训练模型
        :param x: 训练数据特征
        :param y: 训练数据标签
        """
        self.model.fit(x, y)

    def predict(self, prd_x):
        """
        预测
        :param prd_x: 测试数据特征
        :return: 预测结果
        """
        return self.model.predict(prd_x)

def train_model_ml(model_class, x, y, prd_x, **kwargs):
    train_x, test_x, train_y, test_y = preprocess_data(x, y)
    model = model_class(**kwargs)
    model.fit(train_x, train_y)
    test_result = model.predict(test_x)
    R2 = metrics.r2_score(test_y, test_result)
    print(f'R2: {R2}')

    prd_result = model.predict(prd_x)
    return prd_result, R2

#%% 神经网络模型
# 1 BP
class BP(nn.Module):
    def __init__(self, input_size, hidden_size=10, output_size=1, num_layers=1):
        super().__init__()
        self.fc1 = nn.Linear(input_size, hidden_size)
        self.relu = nn.ReLU()
        self.fc2 = nn.Linear(hidden_size, output_size)
        self.init_weights()

    def forward(self, x):
        x = self.fc1(x)
        x = self.relu(x)
        x = self.fc2(x)
        return x

    def init_weights(self):
        initrange = 0.1
        self.fc1.bias.data.zero_()
        self.fc1.weight.data.uniform_(-initrange, initrange)
        self.fc2.bias.data.zero_()
        self.fc2.weight.data.uniform_(-initrange, initrange)

# 2 CNN
class CNN(nn.Module):
    def __init__(self, input_size, hidden_size=10, output_size=1, num_layers=1):
        super().__init__()
        self.conv1 = nn.Conv1d(in_channels=1, out_channels=hidden_size, kernel_size=3, padding=1)
        self.relu = nn.ReLU()
        self.fc1 = nn.Linear(hidden_size * input_size, output_size)
        self.init_weights()

    def forward(self, x):
        x = x.unsqueeze(1)  # Add channel dimension
        x = self.conv1(x)
        x = self.relu(x)
        x = x.view(x.size(0), -1)  # Flatten
        x = self.fc1(x)
        return x

    def init_weights(self):
        initrange = 0.1
        self.conv1.bias.data.zero_()
        self.conv1.weight.data.uniform_(-initrange, initrange)
        self.fc1.bias.data.zero_()
        self.fc1.weight.data.uniform_(-initrange, initrange)

# 3 Wide & Deep 神经网络
class WideDeep(nn.Module):
    def __init__(self, input_size, hidden_size=10, output_size=1, num_layers=1):
        super().__init__()
        self.fc_wide = nn.Linear(input_size, output_size)
        self.fc1_deep = nn.Linear(input_size, hidden_size)
        self.relu = nn.ReLU()
        self.fc2_deep = nn.Linear(hidden_size, output_size)
        self.init_weights()

    def forward(self, x):
        wide_out = self.fc_wide(x)
        deep_out = self.fc1_deep(x)
        deep_out = self.relu(deep_out)
        deep_out = self.fc2_deep(deep_out)
        return wide_out + deep_out

    def init_weights(self):
        initrange = 0.1
        self.fc_wide.bias.data.zero_()
        self.fc_wide.weight.data.uniform_(-initrange, initrange)
        self.fc1_deep.bias.data.zero_()
        self.fc1_deep.weight.data.uniform_(-initrange, initrange)
        self.fc2_deep.bias.data.zero_()
        self.fc2_deep.weight.data.uniform_(-initrange, initrange)

# 4 TabNet
class TabNet_Reg:
    def __init__(self, **params):
        self.model = TabNetRegressor(**params)

    def fit(self, x, y):
        self.model.fit(X_train=x, y_train=y.reshape(-1, 1), max_epochs=100, patience=10, batch_size=1024,
                       virtual_batch_size=128)

    def predict(self, prd_x):
        return self.model.predict(prd_x).reshape(-1)

def train_model_nn(model_class, x, y, prd_x, hidden_size=10, output_size=1, num_layers=1, epochs=50,
                   **kwargs):  # 仅作示意，实际参数可根据实践调整
    input_size = x.shape[1]
    train_x, test_x, train_y, test_y = preprocess_data(x, y)

    train_x = torch.tensor(train_x.values, dtype=torch.float32).to(device)
    train_y = torch.tensor(train_y.values, dtype=torch.float32).unsqueeze(-1).to(device)
    test_x = torch.tensor(test_x.values, dtype=torch.float32).to(device)
    test_y = torch.tensor(test_y.values, dtype=torch.float32).unsqueeze(-1).to(device)
    prd_x = torch.tensor(prd_x.values, dtype=torch.float32).to(device)

    if model_class == TabNet_Reg:
        model = model_class(input_dim=input_size, **kwargs)
    else:
        model = model_class(input_size=input_size, hidden_size=hidden_size, output_size=output_size,
                            num_layers=num_layers).to(device)

    if model_class != TabNet_Reg:
        criterion = nn.MSELoss()
        optimizer = torch.optim.AdamW(model.parameters(), lr=0.001)
        scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.95)

        for epoch in range(1, epochs + 1):
            model.train()
            optimizer.zero_grad()
            output = model(train_x)
            loss = criterion(output, train_y)
            loss.backward()
            optimizer.step()
            scheduler.step()

            if epoch % 10 == 0:
                print(f'Epoch {epoch}, Loss: {loss.item()}')

        model.eval()
        with torch.no_grad():
            test_result = model(test_x).cpu().numpy()
            truth = test_y.cpu().numpy()
            R2 = metrics.r2_score(truth, test_result)
            print(f'R2: {R2}')

            prd_result = model(prd_x).cpu().numpy().squeeze()
    else:
        model.fit(train_x.cpu().numpy(), train_y.cpu().numpy().squeeze())
        test_result = model.predict(test_x.cpu())
        truth = test_y.cpu().numpy()
        R2 = metrics.r2_score(truth, test_result)
        print(f'R2: {R2}')

        prd_result = model.predict(prd_x.cpu())

    return prd_result, R2


# # 示例调用
# x = pd.DataFrame(np.random.randn(100, 6))
# y = pd.Series(np.random.randn(100))
# prd_x = pd.DataFrame(np.random.randn(10, 6))
# # 调用线性回归模型
# result = Linear_Reg(x, y, False).predict(prd_x)
# # 调用机器学习模型
# result, r2 = train_model_ml(RidgeCV_Reg, x, y, prd_x, alphas=(0.1, 1.0, 10.0))
# result, r2 = train_model_ml(LassoCV_Reg, x, y, prd_x, alphas=(0.1, 1.0, 10.0))
# result, r2 = train_model_ml(ElasticNet_Reg, x, y, prd_x, l1_ratio=0.5, alphas=(0.1, 1.0, 10.0))
# result, r2 = train_model_ml(XGBoost_Reg, x, y, prd_x)
# # # 调用神经网络模型
# result, r2 = train_model_nn(BP, x, y, prd_x)
# result, r2 = train_model_nn(CNN, x, y, prd_x)
# result, r2 = train_model_nn(WideDeep, x, y, prd_x)
# result, r2 = train_model_nn(TabNet_Reg, x, y, prd_x)


# 模型集成
def ensemble_predictions(x, y, prd_x):
    models = [RidgeCV_Reg, LassoCV_Reg, ElasticNet_Reg, GradientBoosting_Reg, BP, CNN, WideDeep, TabNet_Reg]  # 默认全用
    all_preds = []
    for model_class in models:
        if model_class in [RidgeCV_Reg, LassoCV_Reg, ElasticNet_Reg, GradientBoosting_Reg]:
            preds, _ = train_model_ml(model_class, x, y, prd_x)
        else:
            preds, _ = train_model_nn(model_class, x, y, prd_x)
        all_preds.append(preds)

    pred_rg = Linear_Reg(x, y, False).predict(prd_x)
    all_preds.append(pred_rg)

    all_preds = np.array(all_preds)
    final_preds = np.mean(all_preds, axis=0)

    return final_preds

# 设置一个函数，用于抑制无用输出
@contextmanager
def suppress_stdout():
    with open(os.devnull, 'w') as devnull:
        old_stdout = sys.stdout
        sys.stdout = devnull
        try:
            yield
        finally:
            sys.stdout = old_stdout

# # 示例调用
# x = pd.DataFrame(np.random.randn(100, 6))
# y = pd.Series(np.random.randn(100))
# prd_x = pd.DataFrame(np.random.randn(10, 6))
# with suppress_stdout():
#     final_result = ensemble_predictions(x, y, prd_x)


# %4 优化模块（主要功能）

# 【从这里正式开始！！】
time_index = meta_data['time'].unique()
regression_window = 20  # 设置20天窗口
stock_selection_num = 15  # 选取股票的个数

pricing_factor_names = meta_data[meta_data.columns[2 : len(meta_data.columns)-1]].columns # 特征列表

meta_data_1 = meta_data.groupby("code").apply(return_calc).reset_index(drop=True).\
                sort_values(['time', 'code']).reset_index(drop=True)
meta_data_2 = meta_data_1[["time", "code"] + list(pricing_factor_names) + ["next_ret"]]
del meta_data_1

# 计算累积收益率
cs_returns = []
for loop_var in range(regression_window, len(time_index)):
    reg_start_date = time_index[loop_var - regression_window]
    reg_end_date = time_index[loop_var - 1]
    prediction_date = time_index[loop_var]

    # temp_df：时间窗口内的取出因子值
    temp_df = meta_data_2[(meta_data_2["time"] >= reg_start_date) * (meta_data_2["time"] <=
                                                                     prediction_date)].sort_values(['time', 'code']).reset_index(drop=True)
    # 将每个因子值映射至[0, 1]
    temp_df[list(pricing_factor_names)] = temp_df[list(pricing_factor_names)].rank(pct=True).fillna(0.50).values

    reg_x = temp_df[(temp_df["time"] >= reg_start_date) * (temp_df["time"] <= reg_end_date)][list(pricing_factor_names)]
    reg_y = temp_df[(temp_df["time"] >= reg_start_date) * (temp_df["time"] <= reg_end_date)]["next_ret"]
    prd_x = temp_df[(temp_df["time"] == prediction_date)][list(pricing_factor_names)]

    # 预测收益率
    # preds_ret = Linear_Reg(reg_x, reg_y, False).predict(prd_x)
    preds_ret, r2 = train_model_nn(TabNet_Reg, reg_x, reg_y, prd_x)

    # 输出收益率最大的stock_selection_num个股票索引
    stock_selection_index = np.argsort(preds_ret)[-stock_selection_num:]

    # 取得当天收益率，计算均值。
    cs_returns.append(
        np.mean(
            temp_df[(temp_df["time"] == prediction_date)]["next_ret"].iloc[stock_selection_index]
        )
    )
    print(prediction_date)
    print("Net Equity is %4.2f." % np.sum(cs_returns))

# draw plot

pyplot.plot(np.cumsum(cs_returns))
pyplot.show()