# %% 使用指南！
## 可以本地写完以后，然后在这里更新代码。直接点击编辑就可以，更新以后会推送到主页面。
## 在下面的提交信息处可以简单写一下更改了什么，然后点提交即可。



# %1 导入包
# from matplotlib import pyplot
# from scipy.stats import zscore
import pandas as pd
import numpy as np
import warnings
import os
import sys
from contextlib import contextmanager
import torch 
import torch.nn as nn
from sklearn import metrics
# from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV, LassoCV, ElasticNetCV
# from sklearn.metrics import r2_score
from pytorch_tabnet.tab_model import TabNetRegressor  # pip install pytorch-tabnet
import xgboost as xgb  # pip install xgboost
import matplotlib.pyplot as plt
from skopt import BayesSearchCV, space, plots, gp_minimize # pip install scikit-optimize
warnings.filterwarnings("ignore")

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)  # 检查是否使用gpu


# %2 加载数据
path = r"D:\硕士打工日志\5课程作业\量化投资课\量化作业2\EquityData"
files = os.listdir(path)

# 文件扩展
market_info_extension = "_Prices.csv"
index_info_extension = "_index_information.csv"
industry_info_extension = "_Industry.csv"
fundamental_info_extension = "_fundamentals.csv"
pricing_factors_info_extension = "_total_factors.csv"
valuation_info_extension = "valuation_information.csv"

# 筛选文件名
list_market_info = [elements for elements in files if market_info_extension in elements]
list_index_info = [elements for elements in files if index_info_extension in elements]
list_industry_info = [elements for elements in files if industry_info_extension in elements]
list_fundamental_info = [elements for elements in files if fundamental_info_extension in elements]
list_pricing_factors_info = [elements for elements in files if pricing_factors_info_extension in elements]
list_valuation_info = [elements for elements in files if valuation_info_extension in elements]

# 加载文件
market_info_df = [pd.read_csv(path + os.sep + elements) for elements in list_market_info]
market_info_df = pd.concat(market_info_df).reset_index(
    drop=True).iloc[:, 1:].sort_values(['time', 'code']).reset_index(drop=True)

index_info_df = [pd.read_csv(path + os.sep + elements) for elements in list_index_info]
index_info_df = pd.concat(index_info_df).reset_index(
    drop=True).iloc[:, 1:].sort_values(['time', 'code']).reset_index(drop=True)

industry_info_df = [pd.read_csv(path + os.sep + elements) for elements in list_industry_info]
industry_info_df = pd.concat(industry_info_df).reset_index(
    drop=True).iloc[:, 1:].sort_values(['time', 'code']).reset_index(drop=True)

pricing_factors_info_df = [pd.read_csv(path + os.sep + elements) for elements in list_pricing_factors_info]
pricing_factors_info_df = pd.concat(pricing_factors_info_df).reset_index(
    drop=True).iloc[:, 1:].sort_values(['time', 'code']).reset_index(drop=True)
pricing_factor_names = pricing_factors_info_df.columns[2:-2]

# 合并数据
meta_data = pd.merge(
    market_info_df,
    index_info_df,
    on=["time", "code"],
    how="inner"
)
meta_data = pd.merge(
    meta_data,
    industry_info_df,
    on=["time", "code"],
    how="inner"
)
meta_data = pd.merge(
    meta_data,
    pricing_factors_info_df,
    on=["time", "code"],
    how="inner"
)

# data = pd.read_csv('000300.XSHG_meta_data.csv')
meta_data = meta_data.sort_values(['time', 'code']).reset_index(drop=True)
# meta_data = data.sort_values(['time', 'code']).reset_index(drop=True)
meta_data = meta_data[meta_data['index'] == 'not in any index'].sort_values(['time', 'code']).reset_index(drop=True)
# 删除多余的close
if 'close_y' in meta_data.columns:
    meta_data.drop(columns=['close_y'], inplace=True)


# 000852.XSHG的话是1000
# 000300.XSHG的话是300
# 000905.XSHG的话是500
# not in any index 的话是全量


# %3 构建预测模型

# 数据预处理
def preprocess_data(x, y):
    train_size = int(len(x) * 0.7)
    train_x, test_x = x[:train_size], x[train_size:]
    train_y, test_y = y[:train_size], y[train_size:]
    return train_x, test_x, train_y, test_y

def return_calc(group):
    group['next_ret'] = group['close_x'].pct_change().shift(-1).fillna(0).values
    return group

# 线性回归
def Linear_Reg(reg_x_var, reg_y_var, intercept_fitting=True):
    model_temp = LinearRegression(fit_intercept=intercept_fitting).fit(reg_x_var, reg_y_var)
    # preds = model_temp.predict(reg_x_var)
    # r2 = r2_score(reg_y_var, preds)
    # print(f'R2: {r2}')
    return model_temp # preds, r2


# 机器学习模型
# 1 Ridge Regression
class RidgeCV_Reg:
    def __init__(self, alphas=(0.1, 1.0, 10.0)):
        self.model = RidgeCV(alphas=alphas, scoring='neg_mean_squared_error', cv=5)

    def fit(self, x, y):
        self.model.fit(x, y)

    def predict(self, prd_x):
        return self.model.predict(prd_x)


# 2 Lasso Regression
class LassoCV_Reg:
    def __init__(self, alphas=(0.1, 1.0, 10.0)):
        self.model = LassoCV(alphas=alphas, cv=5)

    def fit(self, x, y):
        self.model.fit(x, y)

    def predict(self, prd_x):
        return self.model.predict(prd_x)


# 3 ElasticNet Regression 
class ElasticNet_Reg:
    def __init__(self, l1_ratio=0.5, alphas=(0.1, 1.0, 10.0)):
        self.model = ElasticNetCV(l1_ratio=l1_ratio, alphas=alphas, cv=5)

    def fit(self, x, y):
        self.model.fit(x, y)

    def predict(self, prd_x):
        return self.model.predict(prd_x)
        

# 4 xgboost
class XGBoost_Reg:
    def __init__(self, **params):
        default_params = {'n_estimators': 100, 'max_depth': 3, 'learning_rate': 0.1, 'objective': 'reg:squarederror'}
        self.model = xgb.XGBRegressor(**{**default_params, **params})

    def fit(self, x, y):
        self.model.fit(x, y)

    
    def predict(self, prd_x):
        return self.model.predict(prd_x)


def train_model_ml(model_class, x, y, prd_x, **kwargs):
    train_x, test_x, train_y, test_y = preprocess_data(x, y)
    model = model_class(**kwargs)
    model.fit(train_x, train_y)
    test_result = model.predict(test_x)
    R2 = metrics.r2_score(test_y, test_result)
    print(f'R2: {R2}')
    
    prd_result = model.predict(prd_x)
    
    if model_class == XGBoost_Reg:
        feature_importances = model.model.feature_importances_
        top_features = np.argsort(feature_importances)[-6:]
        return prd_result, R2, top_features
    
    return prd_result, R2


# 神经网络模型
# 1 BP
class BP(nn.Module):
    def __init__(self, input_size, hidden_size=10, output_size=1, num_layers=1):
        super().__init__()
        self.fc1 = nn.Linear(input_size, hidden_size)
        self.relu = nn.ReLU()
        self.fc2 = nn.Linear(hidden_size, output_size)
        self.init_weights()

    def forward(self, x):
        x = self.fc1(x)
        x = self.relu(x)
        x = self.fc2(x)
        return x

    def init_weights(self):
        initrange = 0.1
        self.fc1.bias.data.zero_()
        self.fc1.weight.data.uniform_(-initrange, initrange)
        self.fc2.bias.data.zero_()
        self.fc2.weight.data.uniform_(-initrange, initrange)
        

# 2 CNN
class CNN(nn.Module):
    def __init__(self, input_size, hidden_size=10, output_size=1, num_layers=1):
        super().__init__()
        self.conv1 = nn.Conv1d(in_channels=1, out_channels=hidden_size, kernel_size=3, padding=1)
        self.relu = nn.ReLU()
        self.fc1 = nn.Linear(hidden_size * input_size, output_size)
        self.init_weights()

    def forward(self, x):
        x = x.unsqueeze(1)  # Add channel dimension
        x = self.conv1(x)
        x = self.relu(x)
        x = x.view(x.size(0), -1)  # Flatten
        x = self.fc1(x)
        return x

    def init_weights(self):
        initrange = 0.1
        self.conv1.bias.data.zero_()
        self.conv1.weight.data.uniform_(-initrange, initrange)
        self.fc1.bias.data.zero_()
        self.fc1.weight.data.uniform_(-initrange, initrange)


# 3 Wide & Deep 神经网络
class WideDeep(nn.Module):
    def __init__(self, input_size, hidden_size=10, output_size=1, num_layers=1):
        super().__init__()
        self.fc_wide = nn.Linear(input_size, output_size)
        self.fc1_deep = nn.Linear(input_size, hidden_size)
        self.relu = nn.ReLU()
        self.fc2_deep = nn.Linear(hidden_size, output_size)
        self.init_weights()

    def forward(self, x):
        wide_out = self.fc_wide(x)
        deep_out = self.fc1_deep(x)
        deep_out = self.relu(deep_out)
        deep_out = self.fc2_deep(deep_out)
        return wide_out + deep_out

    def init_weights(self):
        initrange = 0.1
        self.fc_wide.bias.data.zero_()
        self.fc_wide.weight.data.uniform_(-initrange, initrange)
        self.fc1_deep.bias.data.zero_()
        self.fc1_deep.weight.data.uniform_(-initrange, initrange)
        self.fc2_deep.bias.data.zero_()
        self.fc2_deep.weight.data.uniform_(-initrange, initrange)

# 4 TabNet 
class TabNet_Reg:
    def __init__(self, **params):
        self.model = TabNetRegressor(**params)

    def fit(self, x, y):
        self.model.fit(X_train=x, y_train=y.reshape(-1, 1), max_epochs=10, patience=10, batch_size=1024, virtual_batch_size=128)

    def predict(self, prd_x):
        return self.model.predict(prd_x).reshape(-1)


def train_model_nn(model_class, x, y, prd_x, hidden_size=10, output_size=1, num_layers=1, epochs=100, **kwargs): 
    input_size = x.shape[1]
    train_x, test_x, train_y, test_y = preprocess_data(x, y)
    
    train_x = torch.tensor(train_x.values, dtype=torch.float32).to(device)
    train_y = torch.tensor(train_y.values, dtype=torch.float32).unsqueeze(-1).to(device)
    test_x = torch.tensor(test_x.values, dtype=torch.float32).to(device)
    test_y = torch.tensor(test_y.values, dtype=torch.float32).unsqueeze(-1).to(device)
    prd_x = torch.tensor(prd_x.values, dtype=torch.float32).to(device)
    
    if model_class == TabNet_Reg:
        model = model_class(input_dim=input_size, **kwargs)
    else:
        model = model_class(input_size=input_size, hidden_size=hidden_size, output_size=output_size, num_layers=num_layers).to(device)
    
    if model_class != TabNet_Reg:
        criterion = nn.MSELoss()
        optimizer = torch.optim.AdamW(model.parameters(), lr=0.001)
        scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.95)

        for epoch in range(1, epochs + 1):
            model.train()
            optimizer.zero_grad()
            output = model(train_x)
            loss = criterion(output, train_y)
            loss.backward()
            optimizer.step()
            scheduler.step()

            if epoch % 10 == 0:
                print(f'Epoch {epoch}, Loss: {loss.item()}')

        model.eval() 
        with torch.no_grad():
            test_result = model(test_x).cpu().numpy()
            truth = test_y.cpu().numpy()
            R2 = metrics.r2_score(truth, test_result)
            print(f'R2: {R2}')

            prd_result = model(prd_x).cpu().numpy().squeeze()
    else:
        model.fit(train_x.cpu().numpy(), train_y.cpu().numpy().squeeze())
        test_result = model.predict(test_x.cpu())
        truth = test_y.cpu().numpy()
        R2 = metrics.r2_score(truth, test_result)
        print(f'R2: {R2}')

        prd_result = model.predict(prd_x.cpu())

    return prd_result, R2


# 模型集成
def ensemble_predictions(x, y, prd_x):
    
    all_preds = []
    
    xgb_preds, _, top_features = train_model_ml(XGBoost_Reg, x, y, prd_x)
    x_top = x.iloc[:, top_features]
    prd_x_top = prd_x.iloc[:, top_features]
    all_preds.append(xgb_preds)
    
    # models = [RidgeCV_Reg, LassoCV_Reg, ElasticNet_Reg, BP, CNN, WideDeep] 
    models = [RidgeCV_Reg, LassoCV_Reg, ElasticNet_Reg, BP, CNN, WideDeep, TabNet_Reg] # 默认全用
    for model_class in models:
        if model_class in [RidgeCV_Reg, LassoCV_Reg, ElasticNet_Reg]:
            preds, _ = train_model_ml(model_class, x_top, y, prd_x_top)
        else:
            preds, _ = train_model_nn(model_class, x_top, y, prd_x_top)
        all_preds.append(preds)
    
    pred_rg = Linear_Reg(x, y, False).predict(prd_x)
    all_preds.append(pred_rg)
    
    all_preds = np.array(all_preds)
    final_preds = np.mean(all_preds, axis=0)
    
    return final_preds



# 设置一个函数，用于抑制无用输出
@contextmanager
def suppress_stdout():
    with open(os.devnull, 'w') as devnull:
        old_stdout = sys.stdout
        sys.stdout = devnull
        try:
            yield
        finally:
            sys.stdout = old_stdout


# 示例调用
# x = pd.DataFrame(np.random.randn(100, 6))
# y = pd.Series(np.random.randn(100))
# prd_x = pd.DataFrame(np.random.randn(10, 6))
# with suppress_stdout():
#     final_result = ensemble_predictions(x, y, prd_x)

 
# %4 优化模块（主要功能）
time_index = meta_data['time'].unique()
regression_window = 20 # 设置20天窗口
stock_selection_num = 15 # 选取股票的个数

meta_data_1 = meta_data.groupby("code").apply(return_calc).reset_index(
    drop=True).sort_values(['time', 'code']).reset_index(drop=True)
pricing_factor_names = [idx for idx in meta_data_1.columns if idx not in ['time', 'code', 'close_x', 'next_ret', 'index']]
meta_data_2 = meta_data_1[["time", "code"] + list(pricing_factor_names) + ["next_ret"]]

# loop_var=23
cs_returns = []
for loop_var in range(regression_window, len(time_index)):
    reg_start_date = time_index[loop_var - regression_window]
    reg_end_date = time_index[loop_var - 1]
    prediction_date = time_index[loop_var]

    temp_df = meta_data_2[(meta_data_2["time"] >= reg_start_date) * (meta_data_2["time"] <=
                                                                     prediction_date)].sort_values(['time', 'code']).reset_index(drop=True)
    temp_df[list(pricing_factor_names)] = temp_df[list(pricing_factor_names)].rank(pct=True).fillna(0.50).values

    reg_x = temp_df[(temp_df["time"] >= reg_start_date) * (temp_df["time"] <= reg_end_date)][list(pricing_factor_names)]
    reg_y = temp_df[(temp_df["time"] >= reg_start_date) * (temp_df["time"] <= reg_end_date)]["next_ret"]
    prd_x = temp_df[(temp_df["time"] == prediction_date)][list(pricing_factor_names)]

    # preds_ret = Linear_Reg(reg_x, reg_y, False).predict(prd_x)
    # preds_ret = ensemble_predictions(reg_x, reg_y, prd_x)
    with suppress_stdout():
        preds_ret = ensemble_predictions(reg_x, reg_y, prd_x)

    stock_selection_index = np.argsort(preds_ret)[-stock_selection_num:]

    cs_returns.append(
        np.mean(
            temp_df[(temp_df["time"] == prediction_date)]["next_ret"].iloc[stock_selection_index]
        )
    )
    print(prediction_date)
    print("Net Equity is %4.2f." % np.sum(cs_returns))


# 绘制净值曲线
plt.figure(figsize=(10, 6))
plt.plot(time_index[regression_window:], np.cumsum(cs_returns), label='Cumulative Returns')
plt.xlabel('Prediction Date')
plt.ylabel('Cumulative Returns')
plt.title('Cumulative Returns Over Time')
plt.xticks(rotation=45, fontsize=8)
plt.grid(True)
plt.show()

# 使用贝叶斯优化

def multi_model_quant(regression_window, stock_selection_num):
    cs_returns = []
    for loop_var in range(regression_window, len(time_index)):
        reg_start_date = time_index[loop_var - regression_window]
        reg_end_date = time_index[loop_var - 1]
        prediction_date = time_index[loop_var]

        temp_df = meta_data_2[(meta_data_2["time"] >= reg_start_date) * (meta_data_2["time"] <=
                                                                         prediction_date)].sort_values(
            ['time', 'code']).reset_index(drop=True)
        temp_df[list(pricing_factor_names)] = temp_df[list(pricing_factor_names)].rank(pct=True).fillna(0.50).values

        reg_x = temp_df[(temp_df["time"] >= reg_start_date) * (temp_df["time"] <= reg_end_date)][list(pricing_factor_names)]
        reg_y = temp_df[(temp_df["time"] >= reg_start_date) * (temp_df["time"] <= reg_end_date)]["next_ret"]
        prd_x = temp_df[(temp_df["time"] == prediction_date)][list(pricing_factor_names)]

        # preds_ret = Linear_Reg(reg_x, reg_y, False).predict(prd_x)
        # preds_ret = ensemble_predictions(reg_x, reg_y, prd_x)
        with suppress_stdout():
            preds_ret, top_features = ensemble_predictions(reg_x, reg_y, prd_x)
        print(reg_x.columns[top_features])
        stock_selection_index = np.argsort(preds_ret)[-stock_selection_num:]

        cs_returns.append(
            np.mean(
                temp_df[(temp_df["time"] == prediction_date)]["next_ret"].iloc[stock_selection_index]
            )
        )
        print(prediction_date)
        print("Net Equity is %4.2f." % np.sum(cs_returns))
    return cs_returns

def objective(search_space):
    regression_window = search_space[0]  # 设置20天窗口
    stock_selection_num = search_space[1]  # 选取股票的个数
    cs_returns = multi_model_quant(regression_window, stock_selection_num)
    return -1 * np.sum(cs_returns)

result = gp_minimize(func=objective, dimensions=search_space, acq_func="EI", n_calls=10)
regression_window, stock_selection_num = result.x
final_cs_ret = multi_model_quant(regression_window, stock_selection_num)

# 绘制净值曲线
plt.figure(figsize=(10, 6))
plt.plot(time_index[regression_window:], np.cumsum(final_cs_ret), label='Cumulative Returns')
plt.xlabel('Prediction Date')
plt.ylabel('Cumulative Returns')
plt.title('Cumulative Returns Over Time')
plt.xticks(rotation=45, fontsize=8)
plt.grid(True)
plt.show()