# -*- coding:UTF-8 -*-
import sys
import time
import gc
from copy import deepcopy
from typing import List, Tuple

import numpy as np
import torch
from torch import nn, Tensor
from torch.utils.data import Dataset, DataLoader
import os

from core.index_base import IndexBase
from domain.transaction_data.repository import transaction_data_repository
from domain.transaction_data.common.column_name import StockTradeDataColumnName
from config.config import config
from index.obv import OBVType
from infrastructure.util.logger import get_logger, LogLevel
from infrastructure.util.model_evaluation import MultiClassEvaluation
from infrastructure.util.dateutils import DateFormat, get_today_dateStr
from models.dataset.stock_multi_labels_dataset import StockThreeLabelsDataset, MultiStockDataset

os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
torch.autograd.set_detect_anomaly(True)

logger = None
model_path = config.get('models', 'lstm_index_model_path') + os.sep + str(os.path.basename(__file__)).rsplit(".", 1)[0]
if not os.path.exists(model_path):
    os.makedirs(model_path)


def init(sub_path):
    global logger, model_path
    sub_path = sub_path if sub_path else get_today_dateStr(dataFormat=DateFormat.YmdHMS)
    model_path = model_path + os.sep + sub_path
    if not os.path.exists(model_path):
        os.makedirs(model_path)
    logger = get_logger(log_name="train_log", log_path=model_path, log_level=LogLevel.INFO)


# Define LSTM Neural Networks
class LstmModel(nn.Module):
    """
        Parameters：
        - input_size: feature size
        - hidden_size: number of hidden units
        - output_size: number of output
        - num_layers: layers of LSTM to stack
    """

    def __init__(self, input_size, hidden_size=1, output_size=1, num_layers=1, batch_first=False, device="cpu", dropout:float=0, seq_len:int=30, **other_args):
        super().__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.num_layers = num_layers
        self.device = device
        self.batch_first = batch_first
        self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=batch_first, dropout=dropout)  # utilize the LSTM model in torch.nn
        self.layer = nn.Sequential(
            # nn.Tanh(),
            # nn.Linear(hidden_size, hidden_size),
            # nn.Tanh(),
            # nn.Linear(hidden_size, hidden_size),
            nn.Tanh(),
            # 最后一层不需要添加激活函数
            nn.Linear(hidden_size, output_size)
        )
        self.hidden, self.c = None, None
        # self.linear = nn.Linear(hidden_size, output_size)
        # self.softmax = nn.LogSoftmax(dim=-1)
        self.softmax = nn.Softmax(dim=-1)

    def forward1(self, _x: torch.Tensor):
        """
        遍历seq，一次输入shape(seq_len, batch_size, feature_size)：[1, batch_size, feature], 然后获取最后一个seq的结果为最终输出
        :param _x:
        :return:seq[-1]
        """
        hidden, c = self.init_hidden(batch_size=_x.shape[0]).to(self.device), self.init_hidden(batch_size=_x.shape[0]).to(self.device)
        for i in range(_x.shape[1]):  # seq
            batch = []
            for j in range(_x.shape[0]):  # batch
                batch.append(_x[j][i].tolist())
            x_b = torch.Tensor([batch]).to(device=self.device)
            x, (hidden, c) = self.lstm(x_b, (hidden, c))
            pre = self.softmax(self.linear(x))
        del hidden, c
        # 仅仅获取 time seq 维度中的最后一个向量
        return pre[:,-1,:]

    def forward(self, _x: torch.Tensor):
        """
        一个seq的最后一个向量的输出为当前seq的输出
        :param _x:
        :return: shape(batch_size, label_size)
        """
        if self.hidden is None:
            self.hidden = self.init_hidden(batch_size=_x.shape[0] if self.batch_first else _x.shape[1]).to(self.device)
        if self.c is None:
            self.c = self.init_hidden(batch_size=_x.shape[0] if self.batch_first else _x.shape[1]).to(self.device)

        x, (self.hidden, self.c) = self.lstm(_x.to(self.device), (self.hidden, self.c))
        pre = self.softmax(self.layer(x))
        # del hidden, c
        return torch.Tensor([b[-1].tolist() for b in pre])

    def init_hidden(self, batch_size=1, requires_grad=True):
        # hidden = torch.randn(self.num_layers, batch_size, self.hidden_size, requires_grad=requires_grad)
        hidden = torch.zeros(self.num_layers, batch_size, self.hidden_size, requires_grad=requires_grad)
        return hidden


class LstmIndex:
    def __init__(self, column_list=[[StockTradeDataColumnName.OPEN, StockTradeDataColumnName.CLOSE, StockTradeDataColumnName.HIGH, StockTradeDataColumnName.VOL], ["label_sell", "label_keep", "label_buy"]]):
        self.model = None
        self.column_list = column_list
        self.data_size = 30
        self.model_file = model_path + os.sep + 'model{}.pt'
        if os.path.exists(self.model_file.format("")):
            self.model = torch.load(self.model_file.format(""))

    def compute(self, data):
        if not self.model:
            return None
        hidden  = self.model.init_hidden(1)
        output, hidden = self.model(data, hidden)
        return output

    def train_one_stock(self, params):
        if not logger:
            init(self.__class__.__name__ + "." + sys._getframe().f_code.co_name + get_today_dateStr(dataFormat=DateFormat.YmdHMS))
        device = torch.device("cuda:0" if params.try_gpu and torch.cuda.is_available() else "cpu")

        # dataset = StockDataset(ts_code=ts_code, seq_len=30, duplicate=True) # 但股票数据集
        dataset = MultiStockDataset(ts_codes=params.ts_codes,
                                    seq_len=params.seq_len,
                                    index_functions=params.index_functions,
                                    duplicate=params.dataitem_duplicate,
                                    cache_paths=params.cache_paths) # 多股票数据集
        model = LstmModel(dataset.features_len, hidden_size=params.hidden_size,
                          output_size=len(dataset.labels),
                          num_layers=params.num_layers,
                          batch_first=params.batch_first,
                          device=device,
                          seq_len=params.seq_len)
        model = model.to(device)
        loss_function = nn.MSELoss()
        optimizer = torch.optim.Adam(model.parameters(), lr=params.learning_rate, weight_decay=0)

        train_eval = MultiClassEvaluation(dataset.labels, detailed_str=True)  # 训练指标
        test_eval = MultiClassEvaluation(dataset.labels, detailed_str=True)  # 测试指标

        hidden_test = model.init_hidden(params.batch_size)

        dataset_test = deepcopy(dataset).drop(drop_ratio=params.train_data_ratio, remain_type="R")
        dataset.drop(drop_ratio=params.train_data_ratio, remain_type="L")

        dataloader = DataLoader(dataset,  # 封装的对象
                                batch_size=params.batch_size,  # 输出的batchsize
                                shuffle=True,  # 随机输出
                                num_workers=0,  # 只有1个进程
                                drop_last=True)  # 会自动舍弃最后不足batchsize的batch
        for epoch in range(params.epoch):
            logger.info(f"epoch[{epoch}/{params.epoch}]...........start...........")
            model.train()
            for index, (x_tensor, y_tensor) in enumerate(dataloader):
                logger.info(f"epoch:{epoch}/{params.epoch}:batch_index:{index}........start..............")
                if hasattr(torch.cuda, 'empty_cache'):
                    torch.cuda.empty_cache()
                output = model(x_tensor)
                output = output.squeeze(0)
                target_ = torch.Tensor([b[0].tolist() for b in y_tensor]).to(device)
                logger.info("结果数据：{}\n标签数据：{}".format(str(output.tolist()), str(target_.tolist())))
                loss = loss_function(output, target_)
                train_eval.add(real=target_, output=output)  # 添加指标计算数据
                # 第一步: 请记住Pytorch会累加梯度.
                # 我们需要在训练每个实例前清空梯度
                optimizer.zero_grad() # 将每个参数的梯度值设为0，即上一次的梯度记录被清空。

                # 释放无关的内存, 解决下边代码报错：RuntimeError: CUDA out of memory. Tried to allocate 2.00 MiB
                if hasattr(torch.cuda, 'empty_cache'):
                    torch.cuda.empty_cache()
                loss.requires_grad_(True)  # 解决loss.backward(retain_graph=True)：RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn
                loss.backward(retain_graph=True)  # 反向传播计算得到每个参数的梯度
                optimizer.step()  # 通过梯度下降执行一步参数更新
                # for p in model.parameters():
                #     p.data.add_(-learning_rate, p.grad.data)
                logger.info('Stock epoch[{}/{}] batch:{}, Loss: {:.5f}'.format(epoch, params.epoch, index, loss.item()))
                logger.info("\n" + str(train_eval)) # 打印训练指标
                if loss.item() < 1e-4:
                    logger.info("The loss value is reached")
            time.sleep(0.003)
            logger.info(f"epoch[{epoch}/{params.epoch}]...........finish...........")

        logger.info("test starting ......")
        model = model.eval()  # switch to testing model
        dataloader = DataLoader(dataset_test,  # 封装的对象
                                batch_size=params.batch_size,  # 输出的batchsize
                                shuffle=False,  # 随机输出
                                num_workers=0,  # 只有1个进程
                                drop_last=True)  # 会自动舍弃最后不足batchsize的batch
        for index, (x_tensor, y_tensor) in enumerate(dataloader):
            output = model(x_tensor)
            target_ = torch.Tensor([b[0].tolist() for b in y_tensor]).to(device)
            output = output.squeeze(0)
            logger.info("测试结果数据：{}\n标签数据：{}".format(str(output.tolist()), str(target_.tolist())))
            test_eval.add(real=target_, output=output)  # 添加指标计算数据
            logger.info(str(test_eval)) # 打印测试指标
        torch.save(model, self.model_file.format(get_today_dateStr(DateFormat.YmdHMS)))  # save model parameters to files


class ModelConfig:
    def __init__(self, ts_codes:List[str],
                 hidden_size:int,
                 num_layers:int,
                 batch_size:int,
                 epoch:int,
                 seq_len:int,
                 learning_rate=3e-5,
                 try_gpu:bool=False,
                 train_data_ratio: float = 0.8,
                 batch_first=False,
                 dataitem_duplicate=True,
                 index_functions: List[IndexBase] = [],
                 dropout:float=0,
                 cache_paths:Tuple[str]=None):
        self.ts_codes: List[str] = ts_codes
        self.hidden_size: int = hidden_size
        self.num_layers: int = num_layers
        self.batch_size: int = batch_size
        self.epoch: int = epoch
        self.try_gpu: int = try_gpu
        """
        设置为False的原因是当前计算机gpu内存2G，训练模型老是报内存不足
        """
        self.seq_len: int = seq_len
        """
        序列（sequence）长度,也就是序列的个数，用文章来说，就是每个句子的长度
        """
        self.dataitem_duplicate:bool = dataitem_duplicate
        """
        一个sequence需要有多条数据，比如1 2 3 4 5 6 7 8 9，seq=3,
            如果当前值为true，则seq1 = [1, 2, 3] seq2 = [2, 3, 4]
            如果当前值为false，则seq1 = [1, 2, 3] seq2 = [4, 5, 6]
        """
        self.learning_rate:float = learning_rate
        self.batch_first = batch_first
        """
        学习率
        """
        self.train_data_ratio: float = train_data_ratio
        self.index_functions: List[IndexBase] = index_functions
        self.dropout:float = dropout
        self.cache_paths:Tuple[str] = cache_paths


from index import *
def test():
    ts_codes = ["000063.SZ", "000065.SZ", "000066.SZ", "000068.SZ", "000069.SZ", "000070.SZ", "000078.SZ", "000088.SZ", "000089.SZ", "000090.SZ", "000096.SZ", "000099.SZ", "000100.SZ", "000150.SZ", "000151.SZ", "000153.SZ", "000155.SZ", "000156.SZ", "000157.SZ", "000158.SZ", "000159.SZ", "000166.SZ", "000301.SZ", "000333.SZ", "000338.SZ", "000400.SZ", "000401.SZ", "000402.SZ", "000403.SZ", "000404.SZ", "000407.SZ", "000408.SZ", "000409.SZ", "000410.SZ", "000411.SZ", "000413.SZ", "000415.SZ", "000416.SZ", "000417.SZ", "000419.SZ", "000420.SZ", "000421.SZ", "000422.SZ", "000423.SZ", "000425.SZ", "000426.SZ", "000428.SZ", "000429.SZ", "000430.SZ", "000488.SZ", "000498.SZ", "000501.SZ", "000502.SZ", "000503.SZ", "000504.SZ", "000505.SZ", "000506.SZ", "000507.SZ", "000509.SZ", "000510.SZ", "000513.SZ", "000514.SZ", "000516.SZ", "000517.SZ", "000518.SZ", "000519.SZ", "000520.SZ", "000521.SZ", "000523.SZ", "000524.SZ", "000525.SZ", "000526.SZ", "000528.SZ", "000529.SZ", "000530.SZ", "000531.SZ", "000532.SZ", "000533.SZ", "000534.SZ", "000536.SZ", "000537.SZ", "000538.SZ", "000539.SZ", "000540.SZ", "000541.SZ", "000543.SZ", "000544.SZ", "000545.SZ", "000546.SZ", "000547.SZ", "000548.SZ", "000550.SZ", "000551.SZ", "000552.SZ", "000553.SZ", "000554.SZ", "000555.SZ", "000557.SZ", "000558.SZ", "000559.SZ", "000560.SZ", "000561.SZ", "000563.SZ", "000564.SZ", "000565.SZ", "000566.SZ", "000567.SZ", "000568.SZ", "000570.SZ", "000571.SZ", "000572.SZ", "000573.SZ", "000576.SZ", "000581.SZ", "000582.SZ", "000584.SZ", "000585.SZ", "000586.SZ", "000587.SZ", "000589.SZ", "000590.SZ", "000591.SZ", "000592.SZ", "000593.SZ", "000595.SZ", "000596.SZ", "000597.SZ", "000598.SZ", "000599.SZ", "000600.SZ", "000601.SZ", "000603.SZ", "000605.SZ", "000606.SZ", "000607.SZ", "000608.SZ", "000609.SZ", "000610.SZ", "000611.SZ", "000612.SZ", "000613.SZ", "000615.SZ", "000616.SZ", "000617.SZ", "000619.SZ", "000620.SZ", "000622.SZ", "000623.SZ", "000625.SZ", "000626.SZ", "000627.SZ", "000628.SZ", "000629.SZ", "000630.SZ", "000631.SZ", "000632.SZ", "000633.SZ", "000635.SZ", "000636.SZ", "000637.SZ", "000638.SZ", "000639.SZ", "000650.SZ", "000651.SZ", "000652.SZ", "000655.SZ", "000656.SZ", "000657.SZ", "000659.SZ", "000661.SZ", "000663.SZ", "000665.SZ", "000666.SZ", "000667.SZ", "000668.SZ", "000669.SZ", "000671.SZ", "000672.SZ", "000673.SZ", "000676.SZ", "000677.SZ", "000678.SZ", "000679.SZ", "000680.SZ", "000681.SZ", "000682.SZ", "000683.SZ", "000685.SZ", "000686.SZ", "000687.SZ", "000688.SZ", "000690.SZ", "000691.SZ", "000692.SZ", "000695.SZ", "000697.SZ", "000698.SZ", "000700.SZ", "000701.SZ", "000702.SZ", "000703.SZ", "000705.SZ", "000707.SZ", "000708.SZ", "000709.SZ", "000710.SZ", "000711.SZ", "000712.SZ", "000713.SZ", "000715.SZ", "000716.SZ", "000717.SZ", "000718.SZ", "000719.SZ", "000720.SZ", "000721.SZ", "000722.SZ", "000723.SZ", "000725.SZ", "000726.SZ", "000727.SZ", "000728.SZ", "000729.SZ", "000731.SZ", "000732.SZ", "000733.SZ", "000735.SZ", "000736.SZ", "000737.SZ", "000738.SZ", "000739.SZ", "000750.SZ", "000751.SZ", "000752.SZ", "000753.SZ", "000755.SZ", "000756.SZ"]
    params = ModelConfig(
        ts_codes=ts_codes,
        hidden_size=32,
        num_layers=3,
        batch_size=128,
        epoch=16,
        seq_len=30,
        learning_rate=3e-5,
        try_gpu=False,
        dataitem_duplicate=True,
        batch_first=True,
        train_data_ratio = 0.8,
        index_functions= [
            (BOLL, dict(period=20, times=2, key='close')),
            # BreakPoint(),
            (DC, dict(period = 20)),
            # EWMA(),
            (KDJ, dict(period=20, alpha=2/3, key='close')),
            (SubtractionMomentum, dict(period=5, key=StockTradeDataColumnName.CLOSE)),
            (ROC, dict(period=5, key=StockTradeDataColumnName.CLOSE)),
            (OBV, dict(period=9, type=OBVType.accumulation, key='close')),
            (SmaRSI, dict(period=6, key=StockTradeDataColumnName.CLOSE)),
            (SMA, dict(period = 5, key = 'close'))
        ],
        dropout=0.2,
        cache_paths=(r"C:\Users\zhang\projects\zh\quantitative_investment\models\lstm\cache_path\22_data.npy",
                     r"C:\Users\zhang\projects\zh\quantitative_investment\models\lstm\cache_path\3_target.npy")
    )
    lstm = LstmIndex(column_list=[[StockTradeDataColumnName.CLOSE], ["label_sell", "label_keep", "label_buy"]])
    init("multi_label")
    lstm.train_one_stock(params)

if __name__ == '__main__':
    test()
