#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Create by zhang
# Create on 2022/7/8 15:40
import os
import random
from typing import List, Dict, Tuple
import numpy as np
import pandas as pd
from rsa import randnum
from torch.utils.data.dataset import Dataset

from core.index_base import IndexBase
from domain.transaction_data.repository import transaction_data_repository
from index.change_in_value import ChangeInValueIndex, CollectType
from infrastructure.optimize.normalize import normalize_z_score
from models.common.enums.transaction_label import TransactionLabelEnum
from domain.transaction_data.common.column_name import StockTradeDataColumnName


class StockOneLabelDataset(Dataset):
    def __init__(self, ts_code:str, seq_len:int = 30, label:TransactionLabelEnum=TransactionLabelEnum.sell,
                 label_key:str=StockTradeDataColumnName.CLOSE, dividing_boundary:float=0.1, future_data_size: Tuple[int]=(1, 4),
                 collect_type:CollectType=CollectType.UP, drop_none_label_benchmark:bool=True, index_functions:List[tuple]=[],
                 duplicate:bool=True, to_normalize:bool=True,
                 remove_continued_label:bool=True, **args):
        """

        @param ts_code: 股票代码
        @param seq_len: 一个数据块包含的原始数据条数
        @param label: 标签枚举，通过.name获取标签字符串
        @param label_key: label提取基准数据项
        @param dividing_boundary: 分割标签的界限
        @param collect_type: 收集类型，up：为dividing_boundary之上收集，down：为dividing_boundary之下收集
        @param drop_none_label_benchmark: 是否删除没有标签的数据
        @param future_data_size: 标签数据产生时间段,future_data_size[0]-future_data_size[1]
        @param index_functions: 指标计算函数，可以动态给数据添加指标数据
        @param duplicate: 一条原始的数据条是否被重复用在数据块中
        @param remove_continued_label: 产生标签的时候是否删除连续出现的后续标签
        """
        super(StockOneLabelDataset, self).__init__()
        self.ts_code:str = ts_code
        self.seq_len = seq_len # 相当于一个序列sequence（如，句子）的长度
        self.label_key: str = label_key
        self.dividing_boundary: float = dividing_boundary
        self.future_data_size: Tuple[int] = future_data_size
        self.collect_type = collect_type
        self.drop_none_label_benchmark:bool = drop_none_label_benchmark
        self.duplicate = duplicate

        self.features:List[str] = [StockTradeDataColumnName.OPEN, StockTradeDataColumnName.CLOSE, StockTradeDataColumnName.HIGH, StockTradeDataColumnName.LOW, StockTradeDataColumnName.VOL]
        self.label = label
        self.labels:List[str] = [label.name]
        self.df = transaction_data_repository.get_stock_transaction_data(ts_code)

        if index_functions is not None and len(index_functions) > 0:
            for fun in index_functions:
                fun  = fun[0](**fun[1])
                fun.compute(self.df)
                self.features.extend(fun.added_keys)

        self.features_len: int = len(self.features)
        self.df.replace(to_replace=np.nan, value=0, inplace=True)
        self.df = self.df.reset_index() # 指标计算需要用时间索引，但是下边的计算需要用数字索引
        self.data:List[list] = list()
        self.target:List[list] = list()
        if self.df is not None and not self.df.empty:
            self.__generate_label()
            normalize_z_score(self.df, keys=self.features)
            self.__splice_data_label()
            # self.__concat_lines_with_overlapping()
            self.data = np.array(self.data, dtype=np.float32)
            self.__to_seq()
            self.__balance()
        else:
            self.data = np.empty([0, 1])
            self.target = np.empty([0, 1])

    def __generate_label(self):
        """
        1. 首先获取未来一段数据（future_data_size）范围内key的 高点 和 低点 的索引
        2. self.collect_type=="up" 使用 高点， self.collect_type=="down"使用低点
        3. 根据使用点的索引计算对比现在的累计涨跌幅
        4. 根据涨跌幅设置标签
        """
        ChangeInValueIndex(adding_key=self.labels[0],
                           period=self.future_data_size,
                           dividing_boundary=self.dividing_boundary,
                           collect_type=self.collect_type,
                           key=self.label_key,
                           remove_continued_label=True).compute(self.df)
        if self.drop_none_label_benchmark:
            self.df.drop(self.df.loc[(self.df.loc[:, self.labels[0]] == np.nan)].index, inplace=True)

    def __splice_data_label(self):
        self.data = []

        def do(row):
            self.data.append(list(row[self.features + self.labels].values))

        self.df.apply(lambda row: do(row), axis=1)

    def __to_seq(self):
        """
        把数据变成seq数据，一个seq一个子表
        @return:
        """
        data_len = len(self.data)
        if self.duplicate:
            data_len = data_len - self.seq_len + 1
        else:
            data_len = int(data_len / self.seq_len)
        data = []
        target = []
        for i in range(data_len):
            if self.duplicate:
                index_end = i + self.seq_len
                data.append(self.data[i:index_end, :len(self.features)].tolist())
                target.append(self.data[i:index_end, len(self.features):].tolist())
            else:
                idx = i * self.seq_len
                seq = list()
                label = list()
                if idx <= len(self.data):
                    for j in range(idx, idx + self.seq_len):
                        seq.append(list(self.data[j][:len(self.features)]))
                        label.append(list(self.data[j][len(self.features):]))
                data.append(seq)
                target.append(label)
        reduce_target = []
        for seq in target:
            reduce_target.append([seq[-1]])
        self.data = np.array(data, dtype=np.float32)
        self.target = np.array(reduce_target, dtype=np.float32)

    def __balance(self):
        """
        平衡各分类的数据量
        @return:
        """
        # conut_dict = dict(zip(self.labels, [0 for i in range(len(self.labels))]))
        count_dict = {
            self.labels[0]: 0,
            "other": 0
        }
        # 存储分标签的数据
        label_data_dict:Dict[str, list] = {
            self.labels[0]: [],
            "other": []
        }
        label_dict:Dict[str, list] = {
            self.labels[0]: [[1]],
            "other": [[0]]
        }

        for i in range(len(self.target)):
            t = self.target[i][0][0]
            if t == 1:
                count_dict[self.labels[0]] = count_dict.get(self.labels[0], 0) + 1
                label_data_dict[self.labels[0]].append(self.data[i].tolist())
            else:
                count_dict["other"] = count_dict.get("other", 0) + 1
                label_data_dict["other"].append(self.data[i].tolist())

        # 获取最少标签的数量, num有可能为0
        num = min(count_dict.values())
        if num > 0:
            data_label_temp = list()
            for key in label_data_dict.keys():
                data_label_temp = data_label_temp + [(d, label_dict[key]) for d in random.sample(label_data_dict[key], num)]

            # shuffle list,避免同标签数据放在一块
            random.shuffle(data_label_temp)
            # 分拆数据和其标签
            data, target = [], []
            for item in data_label_temp:
                data.append(item[0])
                target.append(item[1])
            self.data = np.array(data, dtype=np.float32)
            self.target = np.array(target, dtype=np.float32)
        else:
            self.data = np.empty([0, 1])
            self.target = np.empty([0, 1])

    def drop(self, drop_ratio=0.8, remain_type="L"):
        if len(self) > 1:
            drop_index = int(len(self) * drop_ratio)
            drop_index = drop_index if drop_index < len(self) else len(self) - 1
            self.data = self.data[:drop_index] if remain_type and remain_type.upper() == "L" else self.data[drop_index:]
            return self
        else:
            return None

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        return self.data[index], self.target[index]


import multiprocessing


def dataset_worker(args_queue, dataset_queue):
    """
    多进程dataset生产者
    :param args_queue:
    :param dataset_queue:
    :return:
    """
    try:
        while True:
            args = args_queue.get_nowait()
            dataset = StockOneLabelDataset(**args)
            dataset_queue.put_nowait(dataset)
    except Exception as e:
        raise e


class MultiStockDataset(Dataset):
    def __init__(self, ts_codes:List[str], seq_len:int = 30, label:TransactionLabelEnum=TransactionLabelEnum.sell,
                 label_key:str=StockTradeDataColumnName.CLOSE, dividing_boundary:float=0.1, future_data_size:Tuple[int]=(1, 4),
                 collect_type:CollectType=CollectType.UP, drop_none_label_benchmark:bool=True, index_functions:List[tuple]=[],
                 duplicate:bool=True, to_normalize:bool=True, cache_paths:Tuple[str]=(), remove_continued_label:bool = True, **args):
        """

        @param ts_codes: 股票代码列表
        @param seq_len: 一个数据块包含的原始数据条数
        @param label: 标签枚举，通过.name获取标签字符串
        @param label_key: label提取基准数据项
        @param dividing_boundary: 分割标签的界限
        @param collect_type: 收集类型，up：为dividing_boundary之上收集，down：为dividing_boundary之下收集
        @param drop_none_label_benchmark: 是否删除没有标签的数据
        @param future_data_size: 标签数据产生时间段,future_data_size[0]-future_data_size[1]
        @param index_functions: 指标计算函数，可以动态给数据添加指标数据
        @param duplicate: 一条原始的数据条是否被重复用在数据块中
        @param cache_paths: np.array的缓存文件，cache_paths[0]：为data数据，,格式为[len(self.features)]_data.npy; cache_paths[1]：为target数据,格式为[len(self.labels)]_data.npy
        @param remove_continued_label: 产生标签的时候是否删除连续出现的后续标签
        """
        super(MultiStockDataset, self).__init__()
        assert ts_codes is not None and len(ts_codes) > 0
        self.labels: List[str] = [label.name]
        self.ts_codes:List[str] = ts_codes
        self.exception_ts_codes:List[str] = list()
        self.seq_len = seq_len # 相当于一个序列sequence（如，句子）的长度
        self.duplicate = duplicate
        self.dataset_dict:Dict[str, StockOneLabelDataset] = dict()
        try: # 读取缓存数据
            self.features_len: int = int(os.path.split(cache_paths[0])[1].split(".")[0].split("_")[-1])
            self.data: np.ndarray = np.load(cache_paths[0])
            self.target: np.ndarray = np.load(cache_paths[1])
        except: # 生成数据并保存到缓存
            pool = multiprocessing.Pool(multiprocessing.cpu_count())
            manager = multiprocessing.Manager()
            args_queue = manager.Queue(len(self.ts_codes))
            dataset_queue = manager.Queue(len(self.ts_codes))
            for ts_code in self.ts_codes:
                args_queue.put_nowait({
                    "ts_code": ts_code,
                    "seq_len": seq_len,
                    "label": label,
                    "label_key": label_key,
                    "dividing_boundary": dividing_boundary,
                    "future_data_size": future_data_size,
                    "collect_type": collect_type,
                    "drop_none_label_benchmark": drop_none_label_benchmark,
                    "index_functions": index_functions,
                    "duplicate": duplicate,
                    "to_normalize": to_normalize
                })
            for i in range(multiprocessing.cpu_count()):
            # for i in range(1):
                pool.apply_async(func=dataset_worker, args=(args_queue, dataset_queue,))  # fun_02的入参为fun_01的返回值
            pool.close()
            pool.join()
            while not dataset_queue.empty():
                dataset = dataset_queue.get_nowait()
                if dataset is not None and len(dataset) > 0:
                    self.dataset_dict[dataset.ts_code] = dataset
                else:
                    self.exception_ts_codes.append(dataset.ts_code)
            self.features: List[str] = list(self.dataset_dict.values())[0].features
            self.features_len: int = len(self.features)
            data_temp: List[list] = list()
            target_temp: List[list] = list()
            for ts_code in self.ts_codes:
                if ts_code in self.exception_ts_codes:
                    continue
                data_temp = data_temp + self.dataset_dict[ts_code].data.tolist()
                target_temp = target_temp + self.dataset_dict[ts_code].target.tolist()
            self.data:np.ndarray = np.array(data_temp, dtype=np.float32)
            self.target:np.ndarray = np.array(target_temp, dtype=np.float32)
            # 保存数据到缓存文件
            if cache_paths is not None and len(cache_paths) > 0:
                dir = cache_paths[0]
                if not os.path.exists(dir):
                    if dir.lower().endswith(".npy"):
                        dir = os.path.split(cache_paths[0])[0]
                    os.makedirs(dir, mode=0o755, exist_ok=True)
                data_path = os.path.join(dir, f"StockOneLabelDataset_data_{self.features_len}.npy")
                target_path = os.path.join(dir, f"StockOneLabelDataset_target_{self.target.shape[2]}.npy")
                if os.path.exists(data_path):
                    os.remove(data_path)
                if os.path.exists(target_path):
                    os.remove(target_path)
                np.save(data_path, self.data)
                np.save(target_path, self.target)

    def drop(self, drop_ratio=0.8, remain_type="L"):
        if len(self) > 1:
            drop_index = int(len(self) * drop_ratio)
            drop_index = drop_index if drop_index < len(self) else len(self) - 1
            self.data = self.data[:drop_index] if remain_type and remain_type.upper() == "L" else self.data[drop_index:]
            return self
        else:
            return None

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        return self.data[index], self.target[index]