import os
import pickle
from typing import Callable, Protocol, Type, Union

import lightgbm as lgb
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm

from custom_dataset import FeatureDataSource
from utils import get_code_market_info


class FitModel(Protocol):
    def fit(self, ds: FeatureDataSource) -> None:
        pass

    def predict(self, ds: FeatureDataSource) -> np.array:
        pass


class MLModel(Protocol):
    def fit(self, X: np.array, Y: np.array) -> None:
        pass

    def predict(self, X: np.array) -> np.array:
        pass

    def save_model(self) -> None:
        pass


def get_all_future_codes():
    markets = ["IF", "SF", "DF", "ZF", "GF", "INE"]
    code_info = get_code_market_info()
    is_zeros = code_info.apply(
        lambda x: x["InstrumentID"].endswith("00") and "JQ" not in x["InstrumentID"],
        axis=1,
    )
    code_market_list = (
        code_info[code_info["market"].isin(markets) & is_zeros]["code_market"]
        .drop_duplicates()
        .tolist()
    )
    return code_market_list


class DatasetWrapped(Dataset):
    def __init__(self, ds: FeatureDataSource):
        self.ds = ds

    def __len__(self):
        return len(self.ds)

    def __getitem__(self, item):
        return self.ds[item]


class MLPModel(nn.Module):
    """
    定义神经网络模型，并定义成FitModel接口形式
    """

    def __init__(self):
        super().__init__()
        self.is_init = False

    def init(self, ds: FeatureDataSource):
        # todo: 这里的参数设置理论上应该提供外部接口，由于参数过多，暂时设置接口
        X0, _ = ds[0]
        fsize = X0.shape[-1]
        self.hidden_size = 128
        layers = []
        layers.extend(
            [nn.Linear(fsize, self.hidden_size), nn.Sigmoid(), nn.Dropout(p=0.05)]
        )
        for i in range(2):
            layers.extend(
                [
                    nn.Linear(self.hidden_size, self.hidden_size),
                    nn.Sigmoid(),
                    nn.Dropout(p=0.3),
                ]
            )
        layers.append(nn.Linear(self.hidden_size, 1))
        self.model = nn.Sequential(*layers)

        self.max_iter = 10
        self.early_stop = 100  # 原本是50, 为了效率改成5
        self.use_cuda = True
        self.device = "cuda" if self.use_cuda else "cpu"
        self.model = self.model.to(self.device)
        self.opti = optim.Adam(self.parameters(), lr=2e-4)

    def forward(self, x):
        return self.model(x)

    def fit(self, ds: FeatureDataSource):

        if not self.is_init:
            self.init(ds)

        self.train()
        min_loss = 1e9
        not_update = 0

        data_loader = DataLoader(ds, batch_size=1, shuffle=True, num_workers=0)
        for i in range(self.max_iter):
            tmp = []
            for Xk, Yk in tqdm(data_loader, desc=f"fit batch data:"):
                Xk = Xk.squeeze(0).to(dtype=torch.float32, device=self.device)
                batch = Xk.shape[0]
                Xk = Xk.reshape([batch, -1])
                Yk = Yk.squeeze(0).to(dtype=torch.float32, device=self.device)
                pred_Yk = self.forward(Xk)
                loss = F.mse_loss(pred_Yk.squeeze(-1), Yk)
                self.opti.zero_grad()
                loss.backward()
                self.opti.step()
                # print(f'Epoch: {i}, batch: {k}, Loss: {loss.item()}')
                tmp.append(loss.item())
                mean_loss = loss.item()
                print(f"Epoch: {i}, Loss: {mean_loss}")
                if mean_loss < min_loss:
                    min_loss = mean_loss
                    not_update = 0
                elif i > 1:  # 最少在所有样本上训练两个轮次，然后应用早停策略
                    not_update += 1
                if not_update >= self.early_stop:
                    break
            if not_update >= self.early_stop:
                break

    def predict(self, ds: FeatureDataSource):
        self.eval()
        batch_nums = len(ds)
        all_batch_result = []
        for i in range(batch_nums):
            Xk, Yk = ds[i]
            Xk = torch.tensor(Xk, dtype=torch.float32, device=self.device)
            batch = Xk.shape[0]
            Xk = Xk.reshape([batch, -1])
            pred_Y = self.forward(Xk)
            pred_Y = pred_Y.detach().cpu().numpy()
            all_batch_result.append(pred_Y)
        pred_Y = np.concatenate(all_batch_result, axis=0)
        time_and_code_list = ds.get_all_item_time_stock()
        return pred_Y, time_and_code_list

    def save_model(self, model_path: str):
        os.makedirs(os.path.dirname(model_path), exist_ok=True)
        with open(model_path, "wb") as f:
            pickle.dump(self.state_dict(), f)

    def load_model(self, model_path: str):
        with open(model_path, "rb") as f:
            states = pickle.load(f)
            self.load_state_dict(states)


class GRUModel(MLPModel):
    """
    save_model函数和MLPModel一样，但是fit和predict时，MLPModel使用的是截面特征数据，GRUModel使用的是滚动的时间序列特征
    """

    def init(self, ds: FeatureDataSource):
        # todo: 这里的参数设置理论上应该提供外部接口，由于参数过多，暂时设置接口
        X0, _ = ds[0]
        fsize = X0.shape[-1]
        self.num_layers = 2
        self.hidden_size = 64
        self.gru = nn.GRU(
            fsize,
            hidden_size=self.hidden_size,
            num_layers=self.num_layers,
            batch_first=True,
            dropout=0.3,
        )
        self.fc = nn.Linear(self.hidden_size, 1)
        self.max_iter = 10
        self.early_stop = 100
        self.seq_len = 30
        self.use_cuda = True
        self.device = "cuda" if self.use_cuda else "cpu"
        self.gru = self.gru.to(self.device)
        self.fc = self.fc.to(self.device)
        self.opti = optim.Adam(self.parameters(), lr=2e-4)

    def forward(self, x):
        h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device)
        out, _ = self.gru(x, h0)
        out = out[:, -1, :]  # 获取最后一个时间步的输出
        out = self.fc(out)  # 将输出映射到所需的 output_size
        return out

    def fit(self, ds: FeatureDataSource):

        if not self.is_init:
            self.init(ds)

        self.train()
        min_loss = 1e9
        not_update = 0

        data_loader = DataLoader(ds, batch_size=1, shuffle=True, num_workers=0)
        for i in range(self.max_iter):
            tmp = []
            for Xk, Yk in tqdm(data_loader, desc=f"fit batch data:"):
                Xk = Xk.squeeze(0).to(dtype=torch.float32, device=self.device)
                Yk = Yk.squeeze(0).to(dtype=torch.float32, device=self.device)
                pred_Yk = self.forward(Xk)
                loss = F.mse_loss(pred_Yk.squeeze(-1), Yk)
                self.opti.zero_grad()
                loss.backward()
                self.opti.step()
                # print(f'Epoch: {i}, batch: {k}, Loss: {loss.item()}')
                tmp.append(loss.item())
                mean_loss = loss.item()
                print(f"Epoch: {i}, Loss: {mean_loss}")
                if mean_loss < min_loss:
                    min_loss = mean_loss
                    not_update = 0
                elif i > 1:
                    not_update += 1
                if not_update >= self.early_stop:
                    break
            if not_update >= self.early_stop:
                break

    def predict(self, ds: FeatureDataSource):
        self.eval()
        batch_nums = len(ds)
        all_batch_result = []
        for i in range(batch_nums):
            Xk, Yk = ds[i]
            Xk = torch.tensor(Xk, dtype=torch.float32, device=self.device)
            pred_Y = self.forward(Xk)
            pred_Y = pred_Y.detach().cpu().numpy()
            all_batch_result.append(pred_Y)
        pred_Y = np.concatenate(all_batch_result, axis=0)
        time_and_code_list = ds.get_all_item_time_stock()
        return pred_Y, time_and_code_list


class AGRUModel(GRUModel):
    def init(self, ds: FeatureDataSource):
        # todo: 这里的参数设置理论上应该提供外部接口，由于参数过多，暂时设置接口
        X0, _ = ds[0]
        fsize = X0.shape[-1]
        self.num_layers = 2
        self.hidden_size = 64
        self.gru = nn.GRU(
            fsize,
            hidden_size=self.hidden_size,
            num_layers=self.num_layers,
            batch_first=True,
            dropout=0.1,
        )
        self.att = nn.MultiheadAttention(
            embed_dim=64, num_heads=1, batch_first=True, dropout=0.3
        )
        self.max_iter = 10
        self.early_stop = 100
        self.seq_len = 30
        self.use_cuda = True
        self.device = "cuda" if self.use_cuda else "cpu"
        self.fc = nn.Linear(self.hidden_size + self.seq_len, 1)
        self.gru = self.gru.to(self.device)
        self.att = self.att.to(self.device)
        self.fc = self.fc.to(self.device)
        self.opti = optim.Adam(self.parameters(), lr=2e-4)

    def forward(self, x):
        h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device)
        gru_out, _ = self.gru(x, h0)
        att_in = gru_out[:, :, :]
        attn_output, attn_output_weights = self.att(att_in, att_in, att_in)
        fc_in = torch.cat([gru_out[:, -1, :], attn_output_weights[:, -1, :]], dim=-1)
        out = self.fc(fc_in)  # 将输出映射到所需的 output_size
        return out


class GBDT:
    """
    将gbdt模型包装成MLModel定义的接口模式
    """

    def __init__(self):
        self.is_init = False

    def init(self):
        self.params = {
            "boosting_type": "gbdt",  # 使用梯度提升决策树 (GBDT)
            "objective": "regression",  # 回归问题
            "metric": "mse",  # 使用均方误差 (MSE) 作为评估指标
            "num_leaves": 512,  # 每棵树的最大叶子节点数
            "max_depth": 64,
            "learning_rate": 0.01,  # 学习率
            "feature_fraction": 0.9,  # 每次迭代选择90%的特征
            "colsample_bytree": 0.7,
            "subsample": 0.7,
            "early_stopping_rounds": 50,
            "min_child_samples": 512,
        }
        # todo: 这里的参数设置理论上应该提供外部接口，由于参数过多，暂时设置接口
        self.max_iter = 1000

    def fit(self, X: np.array, Y: np.array):
        if not self.is_init:
            self.init()
            self.is_init = True
        lgb_train = lgb.Dataset(X, Y)
        self.lgb_train = lgb_train
        self.model = lgb.train(
            self.params, lgb_train, num_boost_round=1000, valid_sets=[lgb_train]
        )

    def predict(self, X: np.array):
        # lgb_test = lgb.Dataset(X, reference=self.lgb_train)
        y_pred = self.model.predict(X)
        return y_pred


class MLModelWrapped:
    def __init__(self, model_func: Union[Callable[[], MLModel], Type[MLModel]]):
        """
        包装符合要求的机器学习模型，定义应该类似MLModel，以便能够在自定义的数据类FeatureDataSource的示例对象上进行训练和预测
        :param model_func: 需要输入一个类，这个类需要符合MLModel，
        或者输入一个函数，这个函数没有任何输入参数，返回一个实例化对象，这个示例化对象符合MLModel
        """
        self.model = model_func()

    def fit(self, ds: FeatureDataSource) -> None:
        """
        使用自定义的数据集进行训练
        :param ds:
        """
        X, Y, _ = ds.get_all_items()
        self.model.fit(X, Y)

    def predict(self, ds: FeatureDataSource):
        """
        使用自定义的数据集进行预测
        :param ds:
        :return: 预测的标签，和这个预测标签对应的时间戳和股票代码
        """
        X, Y, time_and_code_list = ds.get_all_items()
        pred_Y = self.model.predict(X)
        return pred_Y, time_and_code_list

    def save_model(self, model_path: dir):
        """
        将数据模型保存在指定的路径中，可能会保存失败
        :param model_path: 模型文件路径
        :return:
        """
        os.makedirs(os.path.dirname(model_path), exist_ok=True)
        try:
            with open(model_path, "wb") as f:
                pickle.dump(self.model, f)
        except Exception as e:
            print(f"save model failed, {self.model}, {e}")


def get_gbdt_model():
    return MLModelWrapped(GBDT)
