#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Create by zhang
# Create on 2022/7/8 15:40
import os
import random
from typing import List, Dict, Tuple
import numpy as np
import pandas as pd
from rsa import randnum
from torch.utils.data.dataset import Dataset

from core.index_base import IndexBase
from domain.transaction_data.repository import transaction_data_repository
from infrastructure.optimize.normalize import normalize_z_score
from models.common.enums.transaction_label import TransactionLabelEnum
from domain.transaction_data.common.column_name import StockTradeDataColumnName


class StockThreeLabelsDataset(Dataset):
    def __init__(self, ts_code:str, seq_len:int = 30, dividing_boundary:Tuple[int]=(-5, 5), index_functions:List[tuple]=[], duplicate:bool=True, to_normalize:bool=True):
        """

        @param ts_code: 股票代码
        @param seq_len: 一个数据块包含的原始数据条数
        @param duplicate: 一条原始的数据条是否被重复用在数据块中
        """
        super(StockThreeLabelsDataset, self).__init__()
        self.ts_code:str = ts_code
        self.seq_len = seq_len # 相当于一个序列sequence（如，句子）的长度
        self.duplicate = duplicate
        self.features:List[str] = [StockTradeDataColumnName.OPEN, StockTradeDataColumnName.CLOSE, StockTradeDataColumnName.HIGH, StockTradeDataColumnName.LOW, StockTradeDataColumnName.VOL]
        self.labels:List[str] = [TransactionLabelEnum.sell.name, TransactionLabelEnum.keep.name, TransactionLabelEnum.buy.name]
        self.df = transaction_data_repository.get_stock_transaction_data(ts_code)
        if index_functions is not None and len(index_functions) > 0:
            for fun in index_functions:
                fun  = fun[0](**fun[1])
                fun.compute(self.df)
                self.features.extend(fun.added_keys)
        self.features_len: int = len(self.features)
        self.df.replace(to_replace=np.nan, value=0, inplace=True)
        self.df = self.df.reset_index() # 指标计算需要用时间索引，但是下边的计算需要用数字索引
        self.data:List[list] = list()
        self.target:List[list] = list()
        if self.df is not None and not self.df.empty:
            self.__generate_label(dividing_boundary=dividing_boundary)
            normalize_z_score(self.df, keys=self.features)
            self.__splice_data_label()
            # self.__concat_lines_with_overlapping()
            self.data = np.array(self.data, dtype=np.float32)
            self.__to_seq()
            self.__balance()
        else:
            self.data = np.empty([0, 1])
            self.target = np.empty([0, 1])

    def __generate_label(self, key:str=StockTradeDataColumnName.CLOSE, dividing_boundary=[-5, 5], future_data_size=(30, 15), drop_none_label_benchmark=True):
        """
        1. 首先获取未来一段数据（future_data_size）范围内key的 高点 和 低点 的索引
        2. 上涨通道使用 高点， 下跌通道使用低点， 其他两点（高点 低点）谁最先出现使用谁
        3. 根据使用点的索引计算对比现在的累计涨跌幅
        4. 根据涨跌幅设置标签
        :param key: 基准键
        """
        key = key if key is not None and len(key) > 0 else StockTradeDataColumnName.CLOSE
        for col in TransactionLabelEnum.get_names():
            self.df[col] = 0
        for i in range(self.df.shape[0] - future_data_size[1]):
            target_index = i + future_data_size[0] if self.df.shape[0] - i - 1 > future_data_size[0] else self.df.shape[0]
            max_idx = self.df.loc[i + 1:target_index, key].idxmax()
            min_idx = self.df.loc[i + 1:target_index, key].idxmin()
            idx:int = max_idx if max_idx < min_idx else min_idx
            if self.df.loc[i, key] > self.df.loc[max_idx, key]:# 下跌通道
                idx = min_idx
            elif self.df.loc[i, key] < self.df.loc[min_idx, key]:# 上涨通道
                idx = max_idx

            rate = (self.df.loc[i + 1:idx, key].sum() - (idx - i) * self.df.loc[i, key]) / self.df.loc[i, key]
            if rate < dividing_boundary[0]:
                self.df.loc[i, TransactionLabelEnum.sell.name] = 1
            elif rate > dividing_boundary[1]:
                self.df.loc[i, TransactionLabelEnum.buy.name] = 1
            else:
                self.df.loc[i, TransactionLabelEnum.keep.name] = 1

        label_sell_count = self.df.loc[:, TransactionLabelEnum.sell.name].sum()
        label_keep_count = self.df.loc[:, TransactionLabelEnum.keep.name].sum()
        label_buy_count = self.df.loc[:, TransactionLabelEnum.buy.name].sum()
        print("Stock[{}] label count:{}(label_sell) {}(label_keep) {}(label_buy)".format(self.ts_code, label_sell_count,
                                                                                         label_keep_count,
                                                                                         label_buy_count))
        if drop_none_label_benchmark:
            self.df.drop(self.df.loc[(self.df.loc[:, TransactionLabelEnum.sell.name] == 0) & (self.df.loc[:, TransactionLabelEnum.keep.name] == 0) & (self.df.loc[:, TransactionLabelEnum.buy.name] == 0)].index, inplace=True)

    def __splice_data_label(self):
        self.data = []

        def do(row):
            self.data.append(list(row[self.features + self.labels].values))

        self.df.apply(lambda row: do(row), axis=1)

    def __to_seq(self):
        """
        把数据变成seq数据，一个seq一个子表
        @return:
        """
        data_len = len(self.data)
        if self.duplicate:
            data_len = data_len - self.seq_len + 1
        else:
            data_len = int(data_len / self.seq_len)
        data = []
        target = []
        for i in range(data_len):
            if self.duplicate:
                index_end = i + self.seq_len
                data.append(self.data[i:index_end, :len(self.features)].tolist())
                target.append(self.data[i:index_end, len(self.features):].tolist())
            else:
                idx = i * self.seq_len
                seq = list()
                label = list()
                if idx <= len(self.data):
                    for j in range(idx, idx + self.seq_len):
                        seq.append(list(self.data[j][:len(self.features)]))
                        label.append(list(self.data[j][len(self.features):]))
                data.append(seq)
                target.append(label)
        reduce_target = []
        for seq in target:
            reduce_target.append([seq[-1]])
        self.data = np.array(data, dtype=np.float32)
        self.target = np.array(reduce_target, dtype=np.float32)

    def __balance(self):
        """
        平衡各分类的数据量
        @return:
        """
        # conut_dict = dict(zip(self.labels, [0 for i in range(len(self.labels))]))
        count_dict = dict()
        # 存储分标签的数据
        label_data_dict:Dict[str, list] = dict()
        label_dict:Dict[str, list] = dict()
        for i, key in enumerate(self.labels):
            label_temp = [0, 0, 0]
            label_temp[i] = 1
            label_data_dict[key] = list()
            label_dict[key] = [label_temp]
            count_dict[key] = 0
        for i in range(len(self.target)):
            t = self.target[i][0]
            max_index = np.argmax(t)
            count_dict[self.labels[max_index]] = count_dict.get(self.labels[max_index], 0) + 1
            label_data_dict[self.labels[max_index]].append(self.data[i].tolist())

        # 获取最少标签的数量, num有可能为0
        num = min(count_dict.values())
        if num > 0:
            data_label_temp = list()
            for key in self.labels:
                data_label_temp = data_label_temp + [(d, label_dict[key]) for d in random.sample(label_data_dict[key], num)]

            # shuffle list,避免同标签数据放在一块
            random.shuffle(data_label_temp)
            # 分拆数据和其标签
            data, target = [], []
            for item in data_label_temp:
                data.append(item[0])
                target.append(item[1])
            self.data = np.array(data, dtype=np.float32)
            self.target = np.array(target, dtype=np.float32)
        else:
            self.data = np.empty([0, 1])
            self.target = np.empty([0, 1])

    def drop(self, drop_ratio=0.8, remain_type="L"):
        if len(self) > 1:
            drop_index = int(len(self) * drop_ratio)
            drop_index = drop_index if drop_index < len(self) else len(self) - 1
            self.data = self.data[:drop_index] if remain_type and remain_type.upper() == "L" else self.data[drop_index:]
            return self
        else:
            return None

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        return self.data[index], self.target[index]


import multiprocessing


def dataset_worker(args_queue, dataset_queue):
    """
    多进程dataset生产者
    :param args_queue:
    :param dataset_queue:
    :return:
    """
    try:
        while True:
            args = args_queue.get_nowait()
            dataset = StockThreeLabelsDataset(ts_code=args["ts_code"], seq_len=args["seq_len"], index_functions=args["index_functions"],
                                              duplicate=args["duplicate"])
            dataset_queue.put_nowait(dataset)
    except Exception as e:
        raise e


class MultiStockDataset(Dataset):
    def __init__(self, ts_codes:List[str], seq_len:int = 30, index_functions:List[tuple]=[], duplicate:bool=True, cache_paths:Tuple[str]=None):
        """

        @param ts_codes: 股票代码列表
        @param seq_len: 一个数据块包含的原始数据条数
        @param duplicate: 一条原始的数据条是否被重复用在数据块中
        @param cache_path: np.array的缓存文件，cache_path[0]：为data数据，,格式为[len(self.features)]_data.npy; cache_path[1]：为target数据,格式为[len(self.labels)]_data.npy
        """
        super(MultiStockDataset, self).__init__()
        assert ts_codes is not None and len(ts_codes) > 0
        self.labels: List[str] = [TransactionLabelEnum.sell.name, TransactionLabelEnum.keep.name, TransactionLabelEnum.buy.name]
        self.ts_codes:List[str] = ts_codes
        self.exception_ts_codes:List[str] = list()
        self.seq_len = seq_len # 相当于一个序列sequence（如，句子）的长度
        self.duplicate = duplicate
        self.dataset_dict:Dict[str, StockThreeLabelsDataset] = dict()
        try: # 读取缓存数据
            self.features_len: int = int(os.path.split(cache_paths[0])[1].split("_")[1])
            self.data: np.ndarray = np.load(cache_paths[0])
            self.target: np.ndarray = np.load(cache_paths[1])
        except: # 生成数据并保存到缓存
            pool = multiprocessing.Pool(multiprocessing.cpu_count())
            manager = multiprocessing.Manager()
            args_queue = manager.Queue(len(self.ts_codes))
            dataset_queue = manager.Queue(len(self.ts_codes))
            for ts_code in self.ts_codes:
                args_queue.put_nowait({"ts_code": ts_code, "seq_len": seq_len, "index_functions": index_functions, "duplicate": duplicate})
            for i in range(multiprocessing.cpu_count()):
                pool.apply_async(func=dataset_worker, args=(args_queue, dataset_queue,))  # fun_02的入参为fun_01的返回值
            pool.close()
            pool.join()
            while not dataset_queue.empty():
                dataset = dataset_queue.get_nowait()
                if dataset is not None and len(dataset) > 0:
                    self.dataset_dict[dataset.ts_code] = dataset
                else:
                    self.exception_ts_codes.append(dataset.ts_code)
            self.features: List[str] = list(self.dataset_dict.values())[0].features
            self.features_len: int = len(self.features)
            data_temp: List[list] = list()
            target_temp: List[list] = list()
            for ts_code in self.ts_codes:
                if ts_code in self.exception_ts_codes:
                    continue
                data_temp = data_temp + self.dataset_dict[ts_code].data.tolist()
                target_temp = target_temp + self.dataset_dict[ts_code].target.tolist()
            self.data:np.ndarray = np.array(data_temp, dtype=np.float32)
            self.target:np.ndarray = np.array(target_temp, dtype=np.float32)
            # 保存数据到缓存文件
            if cache_paths is not None and len(cache_paths) > 0 and os.path.exists(cache_paths[0]):
                data_path = os.path.join(cache_paths[0] if os.path.isdir(cache_paths[0]) else os.path.dirname(cache_paths[0]), f"StockThreeLabelsDataset_data_{self.features_len}.npy")
                target_path = os.path.join(cache_paths[0] if os.path.isdir(cache_paths[0]) else os.path.dirname(cache_paths[0]), f"StockThreeLabelsDataset_target_{self.target.shape[2]}.npy")
                if os.path.exists(data_path):
                    os.remove(data_path)
                if os.path.exists(target_path):
                    os.remove(target_path)
                np.save(data_path, self.data)
                np.save(target_path, self.target)

    def drop(self, drop_ratio=0.8, remain_type="L"):
        if len(self) > 1:
            drop_index = int(len(self) * drop_ratio)
            drop_index = drop_index if drop_index < len(self) else len(self) - 1
            self.data = self.data[:drop_index] if remain_type and remain_type.upper() == "L" else self.data[drop_index:]
            return self
        else:
            return None

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        return self.data[index], self.target[index]