import numpy as np
import pandas as pd
from dataset.Utils import Utils
from datetime import datetime
from tqdm import tqdm

EFFECTIVE_HEADERS = np.array(
    "date,close,volume,money,bollUp,bollMiddle,bollDown,macdDea,macdDif,macdHis,open_interest,symbol".split(
        ","))

"""
## Introduction

包含 macd，boll指标,只包含close的价格
"""

"""
## 
"""


class MIndCsvDatasetAndIndOneHotV2:
    def __init__(self, day_csv=None,
                 min_csv=None,
                 min_lookup=60,
                 up_down_scale=3,
                 predict_offset=3):
        self.min_csv = min_csv
        self.min_lookup = min_lookup
        self.predict_offset = predict_offset
        self.up_down_scale = up_down_scale

    def __normalization(self, array):
        # 根据boll来计算
        ptp = array[:, 3:6].ptp() * 1.5
        volume_ptp = array[:, 1:2].ptp()
        position_ptp = array[:, 2:3].ptp()
        # macd 固定即可
        macd = 10
        if ptp == 0 or volume_ptp == 0 or position_ptp == 0:
            # 存在错误数据
            return
        # new_features.append((f - means) / std)
        denominator = [ptp, volume_ptp, position_ptp, ptp, ptp, ptp, macd, macd, macd]
        bm = array[0][4]
        start = [bm, array[0][1], array[0][2], bm, bm, bm, 0, 0, 0]
        return (array - start) / denominator

    # 这里就应该不返回无变化的，否则数据太乱了
    def __cal_up_down(self, code_ds, current_i, target_close):
        current_close = code_ds.iloc[current_i, 1:2].values[0]
        if target_close > current_close + self.up_down_scale:
            return 1
        elif target_close < current_close - self.up_down_scale:
            return 0

    #
    # def normalization_by_last_close(self, array, normalization_by_last_close):
    #     volume_ptp = array[:, 4:5].ptp()
    #     position_ptp = array[:, 5:6].ptp()
    #     if volume_ptp == 0 or position_ptp == 0:
    #         # 存在错误数据
    #         return
    #     denominator = [normalization_by_last_close * 0.06] * 4 + [volume_ptp, position_ptp]
    #     return (array - array[0]) / denominator

    def read_min(self, normalization=False, ds_cnt=None, filter_no_change=False, start_code=None):
        # 根据最新的日期解析出 dataset
        min_df = pd.read_csv(self.min_csv)
        min_df = min_df.loc[:, EFFECTIVE_HEADERS]
        min_df["day"] = min_df["date"].map(lambda t: Utils.format_time_str_2_day(t))
        min_dataset = []
        target_dataset = []
        min_df = pd.DataFrame(min_df)
        code_min_df = min_df.groupby("symbol")
        start_time = datetime.strptime('9:30', '%H:%M').time()
        pbar = tqdm(total=len(code_min_df), desc='解析分钟序列')
        invalid_day_map = {}
        for every_code_df in code_min_df:
            pbar.update(1)
            if start_code is not None and every_code_df[0] < start_code:
                continue
            # 每个code所有的记录
            code_df = every_code_df[1]
            day_min_df_len = len(code_df)

            for i in range(day_min_df_len):
                # 每天的记录
                current_k = np.array(code_df.iloc[i:i + 1, :])[0]
                day_str = current_k[12]

                time = Utils.parse_time(current_k[0]).time()
                if self.min_lookup < i < (day_min_df_len - self.predict_offset) and time > start_time:
                    min_df = code_df.iloc[i - self.min_lookup + 1:i + 1, :]
                    min_ds = np.array(min_df.iloc[:, 1:10])
                    current_close = current_k[1]
                    target_close = code_df.iloc[i + self.predict_offset, 1:2].values[0]
                    if filter_no_change and current_close == target_close:
                        continue
                    up_down = self.__cal_up_down(code_df, i, target_close)
                    if up_down is None:
                        continue
                    if normalization:
                        min_ds = self.__normalization(min_ds)
                    if min_ds is None:
                        invalid_day_map[day_str] = 1
                    else:
                        min_dataset.append(min_ds)
                        target_dataset.append([up_down])
                        if ds_cnt is not None and len(min_dataset) >= ds_cnt:
                            break
            if len(invalid_day_map) > 0:
                print(f'\n存在无效数据:{invalid_day_map.keys()}')
                invalid_day_map.clear()
        min_dataset = np.array(min_dataset, dtype=np.float32)
        target_dataset = np.array(target_dataset, dtype=np.float32)
        pbar.close()
        print(f'Min dataset shape:{min_dataset.shape}')
        up_down_cnt = target_dataset.sum(axis=0)
        print(f'Target dataset shape:{target_dataset.shape} , '
              f'\nUp Cnt :{up_down_cnt}'
              f'\nDown cnt :{len(target_dataset) - up_down_cnt}')
        return min_dataset, target_dataset
