from os.path import join, exists

import numpy as np
import pandas as pd
from abc import ABCMeta
from tqdm import tqdm
from typing import Dict, List
import json
import datetime
import random

from common import data_dir, company_data_dir, train_data_dir

seconds_per_day = 24 * 3600
epsilon = 1e-4


def get_str_datetime(date_str: str):
    year = int(date_str[0:4])
    month = int(date_str[4:6])
    day = int(date_str[6:8])
    return datetime.datetime(year=year, month=month, day=day)


class SingletonData(metaclass=ABCMeta):
    __positive_data_cols = [
        '昨日收盘价', '开盘价', '收盘价', '最低价', '最高价', '成交量', '成交金额',
        '当日总股本', '当日流通股本', '当日自由流通股本'
    ]

    __float_feature_cols = [
        '昨日收盘价', '开盘价', '收盘价', '最低价', '最高价', '成交量', '成交金额',
        '当日总股本', '当日流通股本', '当日自由流通股本', '复权因子'
    ]

    __discrete_feature_cols = ['停牌', '涨跌停标志']

    @classmethod
    def get(cls):
        raise NotImplemented

    @classmethod
    def positive_data_cols(cls) -> List[str]:
        return cls.__positive_data_cols

    @classmethod
    def float_feature_cols(cls) -> List[str]:
        return cls.__float_feature_cols

    @classmethod
    def discrete_feature_cols(cls) -> List[str]:
        return cls.__discrete_feature_cols

    @classmethod
    def clear(cls):
        raise NotImplemented


class OriginalData(SingletonData):
    __data_instance = None

    @classmethod
    def get(cls) -> pd.DataFrame:
        if cls.__data_instance is None:
            cls.__init_data()
        return cls.__data_instance

    @classmethod
    def __init_data(cls):
        print('reading csv data')
        path = join(data_dir, '2020AStock.csv')
        if not exists(path):
            print('csv data not found, transforming xlsx data to csv data')
            data_xls = pd.read_excel(join(data_dir, '2020A股.xlsx'))
            data_xls.to_csv(path, encoding='utf-8')
        print('read csv data')
        cls.__data_instance = pd.read_csv(path, index_col=0)

    @classmethod
    def clear(cls):
        cls.__data_instance = None


class CompanyDayData(SingletonData):
    __data_instance = None

    @classmethod
    def get(cls) -> Dict[str, pd.DataFrame]:
        if cls.__data_instance is None:
            cls.__init_data()
        return cls.__data_instance

    @classmethod
    def __read_company_names(cls) -> List[str]:
        path = join(data_dir, 'company-names.json')
        if not exists(path):
            print('company names cache file not found, read total companies')
            data = OriginalData.get().loc[:, '公司名称'].drop_duplicates().values
            name_list: list = np.array(data).tolist()
            with open(path, 'w', encoding='utf8') as file:
                json.dump({'companies': name_list}, file, ensure_ascii=False)
        print('read total company names')
        with open(path, 'r', encoding='utf8') as file:
            return json.load(file)['companies']

    @classmethod
    def __init_data(cls):
        print('reading company data')
        cls.__data_instance = {}

        company_names = cls.__read_company_names()
        print('reading data by company names')
        for each in tqdm(company_names):
            filename = each.replace('*', '星__')
            if not exists(join(company_data_dir, f'{filename}.csv')):
                data = ValidDayData.get()
                data = data.loc[data['公司名称'] == each]
                data.to_csv(join(company_data_dir, f'{filename}.csv'))
            data = pd.read_csv(join(
                company_data_dir, f'{filename}.csv'
            ), index_col=0)
            cls.__data_instance[each] = data

    @classmethod
    def clear(cls):
        cls.__data_instance = None


class ValidDayData(SingletonData):
    __data_instance = None

    @classmethod
    def get(cls) -> pd.DataFrame:
        if cls.__data_instance is None:
            cls.__init_data()
        return cls.__data_instance

    @classmethod
    def __init_data(cls):
        data = OriginalData.get()
        print('filter valid data')
        for col in tqdm(cls.positive_data_cols()):
            data = data.loc[data[col] >= 0]

        cls.__data_instance = data

    @classmethod
    def clear(cls):
        cls.__data_instance = None


class CompanyMetaData(SingletonData):
    __data_instance = None

    @classmethod
    def get(cls) -> dict:
        if cls.__data_instance is None:
            cls.__init_data()
        return cls.__data_instance

    @classmethod
    def __init_data(cls):
        compare_date = datetime.datetime(year=2021, month=1, day=1)
        path = join(data_dir, 'company-meta-data.json')

        if not exists(path):
            result = {}
            data = CompanyDayData.get()

            ff_cols = cls.float_feature_cols()

            names = sorted([name for name in data.keys()])
            for name in names:
                company_data = data[name]

                if len(company_data) <= 0:
                    continue

                wind_code = str(company_data.loc[:, 'WIND代码'].values[0])
                wind_code_suffix = wind_code.split('.')[1]
                st = str(company_data.loc[:, 'ST'].values[0])
                list_date = str(company_data.loc[:, '上市日期'].values[0])
                enter_date = str(company_data.loc[:, '进入中信指数日期'].values[0])
                list_time = cls.read_time(compare_date, list_date)
                enter_time = cls.read_time(compare_date, enter_date)

                ff_stat = {}

                for col in ff_cols:
                    col_data = np.array(company_data.loc[:, col])
                    ff_stat[col] = {
                        'mean': float(np.mean(col_data)),
                        'std': float(np.std(col_data)),
                        'min': float(np.min(col_data)),
                        'max': float(np.max(col_data)),
                    }
                    assert ff_stat[col]['min'] >= 0, f'{name}, {col}'

                result[name] = {
                    'id': len(result),
                    'list_time': list_time,
                    'enter_time': enter_time,
                    'st': st,
                    'wind_code_suffix': wind_code_suffix,
                    'ff_stat': ff_stat
                }
            with open(path, 'w', encoding='utf8') as file:
                json.dump(result, file, ensure_ascii=False)
        with open(path, 'r', encoding='utf8') as file:
            cls.__data_instance = json.load(file)

    @staticmethod
    def read_time(compare_date, date_str):
        date = get_str_datetime(date_str)
        return int((compare_date - date).total_seconds() / seconds_per_day)

    @classmethod
    def clear(cls):
        cls.__data_instance = None


class StatisticsData(SingletonData):
    __data_instance = None

    @classmethod
    def get(cls) -> dict:
        if cls.__data_instance is None:
            cls.__init_data()
        return cls.__data_instance

    @classmethod
    def __init_data(cls):
        path = join(data_dir, 'statistics.json')
        if not exists(path):
            result = {
                '连续特征': {},
                '离散特征映射': {}
            }
            valid_data = ValidDayData.get()
            # 连续数值型特征
            col_names = cls.float_feature_cols()
            print('calculating statistics:')
            for col in col_names:
                data = np.array(valid_data.loc[:, col])
                result['连续特征'][col] = {
                    'min': float(np.min(data)),
                    'max': float(np.max(data)),
                    'mean': float(np.mean(data)),
                    'std': float(np.std(data))
                }
            # 离散特征
            for col in cls.discrete_feature_cols():
                d = valid_data.loc[:, col].drop_duplicates().values
                mapping = {}
                for each in d:
                    mapping[int(each)] = len(mapping)
                result['离散特征映射'][col] = mapping

            company_meta_data = CompanyMetaData.get()
            list_time = []
            enter_time = []
            st_set = set()
            wind_code_suffix_set = set()
            for v in company_meta_data.values():
                list_time.append(v['list_time'])
                enter_time.append(v['enter_time'])
                st_set.add(v['st'])
                wind_code_suffix_set.add(v['wind_code_suffix'])
            list_time = np.array(list_time)
            enter_time = np.array(enter_time)
            result['公司连续特征'] = {
                'list_time': {
                    'min': float(np.min(list_time)),
                    'max': float(np.max(list_time)),
                    'mean': float(np.mean(list_time)),
                    'std': float(np.std(list_time))
                },
                'enter_time': {
                    'min': float(np.min(enter_time)),
                    'max': float(np.max(enter_time)),
                    'mean': float(np.mean(enter_time)),
                    'std': float(np.std(enter_time))
                }
            }

            st_mapping = {}
            wind_code_suffix_mapping = {}

            for each in sorted([key for key in st_set]):
                st_mapping[each] = len(st_mapping)
            for each in sorted([key for key in wind_code_suffix_set]):
                wind_code_suffix_mapping[each] = len(wind_code_suffix_mapping)

            result['公司离散特征映射'] = {
                'st': st_mapping,
                'wind_code_suffix': wind_code_suffix_mapping
            }

            with open(path, 'w', encoding='utf8') as file:
                json.dump(result, file, ensure_ascii=False, indent=2)
        with open(path, 'r', encoding='utf8') as file:
            cls.__data_instance = json.load(file)
        for col in cls.discrete_feature_cols():
            mapping = cls.__data_instance['离散特征映射'][col]
            for k in [each for each in mapping.keys()]:
                mapping[int(k)] = mapping[k]
            cls.__data_instance['离散特征映射'][col] = mapping

    @classmethod
    def clear(cls):
        cls.__data_instance = None


class TestSplitSegData(SingletonData):
    __data_instance = None

    @classmethod
    def get(cls) -> dict:
        if cls.__data_instance is None:
            cls.__init_data()
        return cls.__data_instance

    @classmethod
    def __init_data(cls):
        if not exists(join(data_dir, 'test_split_seg.json')):
            # 生成测试训练集分割配置文件
            test_split_record = {}
            cpy_day_data = CompanyDayData.get()
            print('split test set')
            for cpy_name, data in tqdm(cpy_day_data.items()):
                if len(data) < 10:
                    # 少于10天数据不考虑训练和测试
                    continue

                dates = data.loc[:, '交易日期'].values

                first_date = get_str_datetime(str(dates[0]))
                last_date = get_str_datetime(str(dates[-1]))
                days_diff = (last_date - first_date).total_seconds() \
                            // 3600 // 24

                last_date_timestamp = int(last_date.timestamp())
                if days_diff < 40:
                    # 大于等于10天，小于40天加入训练集，抽取最后连续的7天作为测试集
                    test_split_record[cpy_name] = \
                        last_date_timestamp - 7 * seconds_per_day
                elif days_diff <= 120:
                    # 大于等于40天，小于120天加入训练集，
                    # 同时以相同概率抽取最后的连续7或30天进行mask作为测试集
                    if random.randint(0, 1) == 0:
                        days = 7
                    else:
                        days = 30
                    test_split_record[cpy_name] = \
                        last_date_timestamp - days * seconds_per_day
                else:
                    # 大于等于120天，加入训练集，
                    # 同时以相同概率抽取最后连续的7或30天或90天进行mask作为测试集
                    rand = random.randint(0, 2)
                    if rand == 0:
                        days = 7
                    elif rand == 1:
                        days = 30
                    else:
                        days = 90
                    test_split_record[cpy_name] = \
                        last_date_timestamp - days * seconds_per_day
            with open(join(data_dir, 'test_split_seg.json'),
                      'w', encoding='utf8') as file:
                json.dump(test_split_record, file, ensure_ascii=False)
        with open(join(data_dir, 'test_split_seg.json'),
                  'r', encoding='utf8') as file:
            cls.__data_instance = json.load(file)

    @classmethod
    def clear(cls):
        cls.__data_instance = None


class TrainData(SingletonData):
    __data_instance = None

    @classmethod
    def get(cls) -> list:
        if cls.__data_instance is None:
            cls.__init_data()
        return cls.__data_instance

    @classmethod
    def __init_data(cls):
        cls.__data_instance = []
        cm_data = CompanyMetaData.get()

        print('check and generate processed train data')
        for name in tqdm([each for each in cm_data.keys()]):

            test_split_data = TestSplitSegData.get()

            if name not in test_split_data.keys():
                # 数据太少，不训练这个公司的
                continue

            test_begin = test_split_data[name]

            filename = name.replace('*', '星__')

            if not exists(join(train_data_dir, f'{filename}.npz')):
                day_data = CompanyDayData.get()[name]
                statistics_data = StatisticsData.get()
                discrete_feature_map = statistics_data['离散特征映射']

                meta_data = CompanyMetaData.get()[name]
                st_ws_one_hot = np.zeros((1, 12,), dtype=np.float)
                st_id = statistics_data['公司离散特征映射']['st'][
                    str(meta_data['st'])]
                ws_id = statistics_data['公司离散特征映射']['wind_code_suffix'][
                    str(meta_data['wind_code_suffix'])]
                st_ws_one_hot[0, st_id] = st_ws_one_hot[0, ws_id + 10] = 1

                lt_mean = statistics_data['公司连续特征']['list_time']['mean']
                lt_std = statistics_data['公司连续特征']['list_time']['std']

                et_mean = statistics_data['公司连续特征']['enter_time']['mean']
                et_std = statistics_data['公司连续特征']['enter_time']['std']

                lt_et = np.array([[
                    (meta_data['list_time'] - lt_mean) / lt_std,
                    (meta_data['enter_time'] - et_mean) / et_std,
                ]])

                time_one_hot = []
                float_feature = []
                target_feature = []
                discrete_feature = []
                is_next_day_rest = []

                float_feature_meta = {}
                for col in cls.float_feature_cols():
                    float_feature_meta[col] = {
                        'mean': meta_data['ff_stat'][col]['mean'],
                        'std': meta_data['ff_stat'][col]['mean'],
                        'min': meta_data['ff_stat'][col]['min'],
                        'max': meta_data['ff_stat'][col]['max']
                    }
                    assert float_feature_meta[col]['min'] >= 0

                window = {}

                float_data = {}
                for col in cls.float_feature_cols():
                    float_data[col] = day_data.loc[:, col].values

                discrete_feature_data = {}
                for col in cls.discrete_feature_cols():
                    discrete_feature_data[col] = day_data.loc[:, col].values

                is_first_day_rest = False
                for col in cls.float_feature_cols():
                    first_value = float(float_data[col][0])
                    if first_value < 0 or is_first_day_rest:
                        # 如果第一天数据不正常，则用均值作为代替
                        # 异常天当休息天对待
                        is_first_day_rest = True
                        first_value = float_feature_meta[col]['mean']
                    window[col] = [first_value]
                for col in cls.discrete_feature_cols():
                    if is_first_day_rest:
                        first_value = int(
                            discrete_feature_map[col][
                                discrete_feature_data[col][0]
                            ])
                    else:
                        first_value = 0
                    window[col] = [first_value]
                is_day_rest = [is_first_day_rest]

                dates = [get_str_datetime(
                    str(day_data.loc[:, '交易日期'].values[0]))]

                count = 1

                date_data = day_data.loc[:, '交易日期'].values

                while count < len(day_data):
                    index_date = get_str_datetime(str(date_data[count]))
                    next_date = dates[-1] + datetime.timedelta(days=1)

                    if int(next_date.timestamp()) >= test_begin:
                        # 测试集跳过
                        break

                    day_skipped = (index_date - next_date).total_seconds() > 0

                    if day_skipped:
                        dates.append(next_date)
                        # 如果这天跳过了，则用上一天的数据代替
                        for col in cls.float_feature_cols():
                            window[col].append(window[col][-1])
                        for col in cls.discrete_feature_cols():
                            window[col].append(window[col][-1])
                        is_day_rest.append(True)

                        if len(dates) >= 129:
                            cls.new_sample(
                                time_one_hot, float_feature,
                                discrete_feature, target_feature,
                                is_next_day_rest, window, dates, is_day_rest,
                                float_feature_meta
                            )

                        continue
                    is_day_rest.append(False)

                    for col in cls.float_feature_cols():
                        window[col].append(
                            float(float_data[col][count])
                        )
                    for col in cls.discrete_feature_cols():
                        window[col].append(
                            discrete_feature_map[col][
                                discrete_feature_data[col][count]]
                        )
                    dates.append(next_date)
                    count += 1

                    if len(dates) >= 129:
                        cls.new_sample(
                            time_one_hot, float_feature,
                            discrete_feature, target_feature,
                            is_next_day_rest, window, dates, is_day_rest,
                            float_feature_meta
                        )

                if len(time_one_hot) == 0:
                    cls.new_sample(
                        time_one_hot, float_feature,
                        discrete_feature, target_feature,
                        is_next_day_rest, window, dates, is_day_rest,
                        float_feature_meta
                    )

                st_ws_one_hot = st_ws_one_hot.repeat(len(time_one_hot), axis=0)
                lt_et = lt_et.repeat(len(time_one_hot), axis=0)
                cpy_id = np.array([[int(meta_data['id'])]]).repeat(
                    len(time_one_hot), 0)

                time_one_hot = np.concatenate(time_one_hot, axis=0)
                float_feature = np.concatenate(float_feature, axis=0)
                target_feature = np.concatenate(target_feature, axis=0)
                discrete_feature = np.concatenate(discrete_feature, axis=0)
                is_next_day_rest = np.concatenate(is_next_day_rest, axis=0)

                data_dict = {
                    'cpy_id': cpy_id,
                    'st_ws_one_hot': st_ws_one_hot,
                    'lt_et': lt_et,
                    'time_one_hot': time_one_hot,
                    'float_feature': float_feature,
                    'target_feature': target_feature,
                    'discrete_feature': discrete_feature,
                    'is_next_day_rest': is_next_day_rest,
                }

                np.savez(join(train_data_dir, f'{filename}.npz'), **data_dict)
            cls.__data_instance.append(name)

    @classmethod
    def new_sample(
            cls,
            time_one_hot, float_feature,
            discrete_feature, target_feature, is_next_day_rest,
            window, dates, is_rest,
            float_feature_meta: dict
    ):
        data_count = len(dates)
        new_time_one_hot = np.zeros((1, 128, 50), dtype=np.float)
        new_float_feature = np.zeros((1, 128, 22), dtype=np.float) + epsilon
        new_target_feature = np.zeros((1, 128, 23), dtype=np.float) - 1
        new_is_next_day_rest = np.zeros((1, 128, 1), dtype=np.float) + 1
        new_discrete_feature = np.zeros((1, 128, 9), dtype=np.float)
        for i in range(data_count - 1):
            new_time_one_hot[0, i, dates[i].month - 1] = 1
            new_time_one_hot[0, i, dates[i].day - 1 + 12] = 1
            new_time_one_hot[0, i, dates[i].weekday() + 43] = 1

            for j, col in enumerate(cls.float_feature_cols()):
                mean = float_feature_meta[col]['mean']
                std = float_feature_meta[col]['std']
                c_max = float_feature_meta[col]['max']
                c_min = float_feature_meta[col]['min']
                new_float_feature[0, i, j] = \
                    (window[col][i] - mean) / (std + epsilon)
                new_target_feature[0, i, j] = \
                    (window[col][i + 1] - mean) / \
                    (std + epsilon)
                if not is_rest[i + 1]:
                    scale = epsilon + (c_max - c_min) / (std + epsilon)
                    new_target_feature[0, i, j + 11] = scale
                    new_float_feature[0, i, j + 11] = scale

            if not is_rest[i + 1]:
                # 下一天不是休息的，那么该训练目标就有效
                new_target_feature[0, i, 22] = 1
                new_is_next_day_rest[0, i, 0] = 0

            # 离散特征特征向量
            new_discrete_feature[0, i, window['停牌'][i]] = 1
            new_discrete_feature[0, i, window['涨跌停标志'][i] + 5] = 1

        time_one_hot.append(new_time_one_hot)
        float_feature.append(new_float_feature)
        target_feature.append(new_target_feature)
        discrete_feature.append(new_discrete_feature)
        is_next_day_rest.append(new_is_next_day_rest)
        for col in cls.float_feature_cols():
            window[col].pop(0)
        for col in cls.discrete_feature_cols():
            window[col].pop(0)
        dates.pop(0)
        is_rest.pop(0)

    @classmethod
    def clear(cls):
        cls.__data_instance = None
