import pandas as pd
import numpy as np
import os
import time
import datetime
from collections import OrderedDict
from concurrent.futures import ThreadPoolExecutor, as_completed
# from openpyxl.utils import get_column_letter, column_index_from_string


pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
pd.set_option('display.width', 1000000)
pd.set_option('display.float_format', lambda x: '%.3f' % x)
np.set_printoptions(suppress=True, precision=4, threshold=np.inf)


# 对excel列进行字母/数字转化
# c_num = column_index_from_string('B')  # c_num = 2


symbol_mapping = OrderedDict()
symbol_mapping['SZ'] = 'XSHE'
symbol_mapping['SH'] = 'XSHG'
type_file_name_mapping = OrderedDict()
type_file_name_mapping['stock'] = {'snap': 'TDB_snapshot_{0}_{1}.csv', 'trade': 'TDB_transaction_{0}_{1}.csv',
                                   'merge': '{}.csv'}
type_file_name_mapping['d_currency'] = {'snap': '{1}-SWAP_{0}.csv', 'trade': '{1}_{0}.csv',
                                        'merge': '{}.csv'}


def get_file_name_li(select_type, start, end, data_path, symbol):
    source_file_li = []
    trans_file_li = []
    snap = type_file_name_mapping[select_type]['snap']
    trade = type_file_name_mapping[select_type]['trade']
    for dt in pd.date_range(start, end, freq='d').strftime('%Y%m%d'):
        for file in os.listdir(data_path):
            file_temp = os.path.join(data_path, file)
            if os.path.isdir(file_temp) and file.startswith(dt):
                file_name = snap.format(dt, symbol)
                file_name = os.path.join(file_temp, file_name)
                # print(file_name)
                # time.sleep(500)
                if os.path.exists(file_name) and file_name not in source_file_li:
                    source_file_li.append(file_name)
                file_name = trade.format(dt, symbol)
                file_name = os.path.join(file_temp, file_name)
                # print(file_name)
                if os.path.exists(file_name) and file_name not in trans_file_li:
                    trans_file_li.append(file_name)
    # print('source_file_li:', len(source_file_li), source_file_li)
    # print('trans_file_li:', len(trans_file_li), trans_file_li)
    # time.sleep(500)
    return sorted(source_file_li), sorted(trans_file_li)


def get_file_name_li_from_merge_data_path(select_type, start, end, merge_data_path, symbol):
    merge_file_li = []
    merge_file = type_file_name_mapping[select_type]['merge']
    for dt in pd.date_range(start, end, freq='d').strftime('%Y-%m-%d'):
        for file in os.listdir(merge_data_path):
            file_temp = os.path.join(merge_data_path, file)
            if os.path.isdir(file_temp) and file.startswith(dt):
                file_name = merge_file.format(symbol)
                file_name = os.path.join(file_temp, file_name)
                print(file_name)
                # time.sleep(500)
                if os.path.exists(file_name) and file_name not in merge_file_li:
                    merge_file_li.append(file_name)
    print('merge_file_li:', len(merge_file_li), merge_file_li)
    # time.sleep(500)
    return sorted(merge_file_li)


month_mapping = {'Jan': '01', 'Feb': '02', 'Mar': '03', 'Apr': '04', 'May': '05', 'Jun': '06', 'Jul': '07',
                 'Aug': '08', 'Sep': '09', 'Oct': '10', 'Nov': '11', 'Dec': '12'}


class DataProcess(object):

    def __init__(self, snap_path, trans_path, sys_freq):
        self.snap_path = snap_path
        self.trans_path = trans_path
        self.sys_freq = sys_freq

    @staticmethod
    def pre_process_columns(df):
        new_cols = []
        for _ in df.columns:
            if _ == 'Unnamed: 0':
                col = 'datetime'
            else:
                col = _.strip()
            new_cols.append(col)
        df.columns = new_cols
        pat = r'[a-zA-Z]+'

        def repl(m):
            # print(m.group(0))
            return month_mapping[m.group(0)]
        # print(df.head(10))
        if 'datetime' in df.columns:
            df['datetime'] = df['datetime'].str.replace(pat, repl)
            df['datetime'] = pd.to_datetime(df['datetime'], format="%Y-%m-%d %H:%M:%S")
        else:
            df['datetime'] = pd.to_datetime(df['Date'].apply(str) + df['Time'].apply(lambda x: '%06d' % int(x))
                                            + df['Millisec'].apply(lambda x: '%03d' % int(x)), format="%Y%m%d%H%M%S%f")
        # print(df.head(10))
        df.set_index(['datetime'], inplace=True)
        if 'index' in df.columns:
            df = df.drop(columns=['index'])
        return df

    def reform_data(self):
        pass


# 按照深度方式加载数据，单个symbol
class StockProcess(DataProcess):
    def reform_data(self):
        snap_data = pd.read_csv(self.snap_path)
        trans_data = pd.read_csv(self.trans_path)
        snap_data = self.pre_process_columns(snap_data)
        trans_data = self.pre_process_columns(trans_data)
        print('snap data origin length:', len(snap_data))
        print('trans data origin length:', len(trans_data))

        mask = snap_data.index >= snap_data.index.min().replace(hour=9, minute=15, second=0)
        snap_data = snap_data[mask]
        snap_data = snap_data.resample(self.sys_freq, closed='right', label='right').last().fillna(method='pad')
        mask = (snap_data.index > snap_data.index.min().replace(hour=11, minute=30, second=0)) & \
               (snap_data.index < snap_data.index.min().replace(hour=13, minute=0, second=0))
        snap_data = snap_data[~mask]
        # print('snap data resample length:', len(snap_data))
        mask = (trans_data.index >= trans_data.index.min().replace(hour=9, minute=15, second=0, microsecond=0))
        trans_data = trans_data[mask]
        mask = (trans_data.index >= trans_data.index.min().replace(hour=9, minute=30, second=0, microsecond=0)) & \
               (trans_data['close'] == 0)
        trans_data = trans_data[~mask]
        # print(trans_data.tail(5))
        if trans_data.index.max() < trans_data.index.max().replace(hour=15, minute=0, second=0, microsecond=0):
            add_s_t = trans_data.index.max().replace(hour=15, minute=0, second=0, microsecond=0)
            add_s = pd.Series()
            add_s.name = add_s_t
            trans_data = trans_data.append(add_s)
            # from collections import Counter
            # b = dict(Counter(index_li))
            # print(len([key for key, value in b.items() if value > 1]))
            # trans_data = trans_data.reindex(index_li)
        # print(trans_data.head(1))
        # print(trans_data[(trans_data.index >= pd.to_datetime('2022-01-10 14:49:00')) & (
        #         trans_data.index <= pd.to_datetime('2022-01-10 14:51:00'))])
        trans_data['vol_transaction_temp'] = trans_data['vol_transaction']
        trans_data['vol_transaction_temp_sum'] = trans_data['vol_transaction'] * trans_data['close']
        mask = trans_data['type_transaction'] == 'S'
        trans_data.loc[mask, 'vol_transaction_temp'] = 0
        trans_data.loc[mask, 'vol_transaction_temp_sum'] = 0
        b_df = trans_data['vol_transaction_temp'].resample(self.sys_freq, closed='right', label='right').sum()
        b_df.name = 'b_vol'
        # print(b_df.tail(10))
        b_df_sum = trans_data['vol_transaction_temp_sum'].resample(self.sys_freq, closed='right', label='right').sum()
        b_df_sum.name = 'b_vol_total_value'
        # print(b_df_sum.tail(10))
        # print('b_df length:', len(b_df))
        # 成交笔数
        b_unit_df = (trans_data['vol_transaction_temp'] != 0).resample(self.sys_freq, closed='right', label='right').sum()
        b_unit_df.name = 'b_unit'
        # print('b_unit_df length:', len(b_unit_df))
        trans_data['vol_transaction_temp'] = trans_data['vol_transaction']
        trans_data['vol_transaction_temp_sum'] = trans_data['vol_transaction'] * trans_data['close']
        mask = trans_data['type_transaction'] == 'B'
        trans_data.loc[mask, 'vol_transaction_temp'] = 0
        trans_data.loc[mask, 'vol_transaction_temp_sum'] = 0
        s_df = trans_data['vol_transaction_temp'].resample(self.sys_freq, closed='right', label='right').sum()
        s_df.name = 's_vol'
        # print(s_df.tail(10))
        s_df_sum = trans_data['vol_transaction_temp_sum'].resample(self.sys_freq, closed='right', label='right').sum()
        s_df_sum.name = 's_vol_total_value'
        # print(s_df_sum.tail(10))
        # print('s_df length:', len(s_df))
        # 成交笔数
        s_unit_df = (trans_data['vol_transaction_temp'] != 0).resample(self.sys_freq, closed='right', label='right').sum()
        s_unit_df.name = 's_unit'
        # print(s_unit_df.tail(10))
        # print('s_unit_df length:', len(s_unit_df))
        p_df = trans_data['close'].resample(self.sys_freq, closed='right', label='right').mean().fillna(method='pad')
        p_df.name = 't_close'
        # print(p_df.tail(10))
        # print(trans_data[(trans_data.index > pd.to_datetime('2022-01-10 14:49:00')) & (
        #         trans_data.index <= pd.to_datetime('2022-01-10 14:50:00'))]['close'].mean())
        # print(p_df[(p_df.index == pd.to_datetime('2022-01-10 14:50:00'))])
        # print('p_df length:', len(p_df))
        trans_df = pd.concat([b_df, s_df, p_df, b_unit_df, s_unit_df, b_df_sum, s_df_sum], axis=1)
        mask = (trans_df.index > trans_df.index.min().replace(hour=11, minute=30, second=0)) & \
               (trans_df.index < trans_df.index.min().replace(hour=13, minute=0, second=0))
        trans_df = trans_df[~mask]
        # print('trans data resample length:', len(trans_df))
        data = pd.merge(left=snap_data, right=trans_df, left_index=True, right_index=True, how='left')
        select_cols = ['high', 'low', 'open',
                       'prc_ask_001', 'vol_ask_001', 'prc_ask_002', 'vol_ask_002',
                       'prc_ask_003', 'vol_ask_003', 'prc_ask_004', 'vol_ask_004',
                       'prc_ask_005', 'vol_ask_005', 'prc_ask_006', 'vol_ask_006',
                       'prc_ask_007', 'vol_ask_007', 'prc_ask_008', 'vol_ask_008',
                       'prc_ask_009', 'vol_ask_009', 'prc_ask_010', 'vol_ask_010',
                       'prc_bid_001', 'vol_bid_001', 'prc_bid_002', 'vol_bid_002',
                       'prc_bid_003', 'vol_bid_003', 'prc_bid_004', 'vol_bid_004',
                       'prc_bid_005', 'vol_bid_005', 'prc_bid_006', 'vol_bid_006',
                       'prc_bid_007', 'vol_bid_007', 'prc_bid_008', 'vol_bid_008',
                       'prc_bid_009', 'vol_bid_009', 'prc_bid_010', 'vol_bid_010',
                       'b_vol', 's_vol', 't_close', 'b_unit', 's_unit',
                       'b_vol_total_value', 's_vol_total_value']
        data = data[select_cols]
        data.rename(columns={'t_close': 'close'}, inplace=True)
        mask = pd.isna(data['close']) | (data['close'] == 0)
        data = data[~mask]
        data['temp'] = data['b_vol'] + data['s_vol']
        data['volume'] = data['temp'].cumsum()
        data['total_turnover'] = data['temp'] * data['close']
        data['total_turnover'] = data['total_turnover'].cumsum()
        data['volume_add'] = data['b_vol'] + data['s_vol']
        print('merge length:', len(data))
        # time.sleep(6000)
        return data


# data_source调用，加载单个symbol的数据
# book_id 是转换后的米筐可以识别的，ori_book_id是原始的
def get_source_tick_data(sel_type, file_li, trans_file_li, book_id, sys_freq):
    print('file_li:**', file_li)
    print('trans file li:**', trans_file_li)
    print('book_id:**', book_id)
    source_tick_data_li = []

    for i in range(len(file_li)):
        snap_path = file_li[i]
        trans_path = trans_file_li[i]
        assert snap_path.rsplit('_', 2)[-1] == trans_path.rsplit('_', 2)[-1]
        source_file_data = router_map[sel_type](snap_path, trans_path, sys_freq).reform_data()
        # source_file_data = reform_data(snap_path, trans_path, sys_freq)
        source_tick_data_li.append(source_file_data)
    source_tick_data = pd.concat(source_tick_data_li, sort=False)

    source_tick_data['datetime'] = source_tick_data.index
    source_tick_data['trading_date'] = pd.to_datetime(source_tick_data['datetime']).dt.strftime('%Y-%m-%d')
    source_tick_data['order_book_id'] = book_id

    fields_rename_mapping = {'prc_ask_001': 'a1', 'prc_ask_002': 'a2', 'prc_ask_003': 'a3',
                             'prc_ask_004': 'a4', 'prc_ask_005': 'a5',
                             'prc_ask_006': 'a6', 'prc_ask_007': 'a7', 'prc_ask_008': 'a8',
                             'prc_ask_009': 'a9', 'prc_ask_010': 'a10',
                             'prc_bid_001': 'b1', 'prc_bid_002': 'b2', 'prc_bid_003': 'b3',
                             'prc_bid_004': 'b4', 'prc_bid_005': 'b5',
                             'prc_bid_006': 'b6', 'prc_bid_007': 'b7', 'prc_bid_008': 'b8',
                             'prc_bid_009': 'b9', 'prc_bid_010': 'b10',
                             'vol_ask_001': 'a1_v', 'vol_ask_002': 'a2_v', 'vol_ask_003': 'a3_v',
                             'vol_ask_004': 'a4_v', 'vol_ask_005': 'a5_v',
                             'vol_ask_006': 'a6_v', 'vol_ask_007': 'a7_v', 'vol_ask_008': 'a8_v',
                             'vol_ask_009': 'a9_v', 'vol_ask_010': 'a10_v',
                             'vol_bid_001': 'b1_v', 'vol_bid_002': 'b2_v', 'vol_bid_003': 'b3_v',
                             'vol_bid_004': 'b4_v', 'vol_bid_005': 'b5_v',
                             'vol_bid_006': 'b6_v', 'vol_bid_007': 'b7_v', 'vol_bid_008': 'b8_v',
                             'vol_bid_009': 'b9_v', 'vol_bid_010': 'b10_v',
                             # 'acc_vol_transaction': 'volume', 'acc_amt_transaction': 'total_turnover'
                             }
    source_tick_data = source_tick_data.rename(columns=fields_rename_mapping)
    source_tick_data['open_interest'] = 0
    source_tick_data['limit_up'] = 1000
    source_tick_data['limit_down'] = -100000
    source_tick_data['last'] = source_tick_data['close']
    select_columns = ['datetime', 'last', 'open', 'high', 'low', 'volume', 'volume_add',
                      'total_turnover',
                      # 'volume_cum', 'total_turnover_cum',
                      'b_vol', 's_vol', 'b_unit', 's_unit', 'b_vol_total_value', 's_vol_total_value',
                      'open_interest',
                      'a1', 'a1_v', 'a2', 'a2_v', 'a3', 'a3_v', 'a4', 'a4_v', 'a5', 'a5_v',
                      'a6', 'a6_v', 'a7', 'a7_v', 'a8', 'a8_v', 'a9', 'a9_v', 'a10', 'a10_v',
                      'b1', 'b1_v', 'b2', 'b2_v', 'b3', 'b3_v', 'b4', 'b4_v', 'b5', 'b5_v',
                      'b6', 'b6_v', 'b7', 'b7_v', 'b8', 'b8_v', 'b9', 'b9_v', 'b10', 'b10_v',
                      'close', 'limit_up', 'limit_down',
                      'order_book_id', 'trading_date']
    source_tick_data = source_tick_data[select_columns]
    source_tick_data.sort_index(inplace=True)
    # print(source_tick_data.head(2))
    # source_tick_data.to_csv('./test.csv')
    return source_tick_data


def get_source_tick_data_from_merge_path(merge_file_li):
    source_tick_data_li = []
    for file in merge_file_li:
        source_file_data = pd.read_csv(file)
        source_tick_data_li.append(source_file_data)
    if len(source_tick_data_li) != 0:
        source_tick_data = pd.concat(source_tick_data_li, sort=False)
    else:
        select_columns = ['datetime', 'last', 'open', 'high', 'low', 'volume', 'volume_add',
                          'total_turnover',
                          # 'volume_cum', 'total_turnover_cum',
                          'b_vol', 's_vol', 'b_unit', 's_unit', 'b_vol_total_value', 's_vol_total_value',
                          'open_interest',
                          'a1', 'a1_v', 'a2', 'a2_v', 'a3', 'a3_v', 'a4', 'a4_v', 'a5', 'a5_v',
                          'a6', 'a6_v', 'a7', 'a7_v', 'a8', 'a8_v', 'a9', 'a9_v', 'a10', 'a10_v',
                          'b1', 'b1_v', 'b2', 'b2_v', 'b3', 'b3_v', 'b4', 'b4_v', 'b5', 'b5_v',
                          'b6', 'b6_v', 'b7', 'b7_v', 'b8', 'b8_v', 'b9', 'b9_v', 'b10', 'b10_v',
                          'close', 'limit_up', 'limit_down',
                          'order_book_id', 'trading_date']
        source_tick_data = pd.DataFrame(columns=select_columns)
    source_tick_data['datetime'] = pd.to_datetime(source_tick_data['datetime'])
    source_tick_data.index = source_tick_data['datetime']
    # print(source_tick_data.head(10))
    # time.sleep(5)
    return source_tick_data


# 切面方式的数据加载， 广度方式加载所有symbol数据
class DataLoader(object):
    def __init__(self, select_type, start_date, end_date, data_path, sys_freq, symbols=None, merge_data_path=None):
        self.start_date = start_date
        self.end_date = end_date
        self.data_path = data_path
        self.symbols = symbols
        self.select_type = select_type
        self.sys_freq = sys_freq
        self.merge_data_path = merge_data_path

    def single_symbol_load_from_merge_path(self, _symbol):
        order_book_id = _symbol
        print('order book id:', _symbol)
        for k, v in symbol_mapping.items():
            if k in _symbol:
                order_book_id = _symbol.replace(k, v)
                break
        print('order book id mapping:', order_book_id)
        merge_file_li = get_file_name_li_from_merge_data_path(self.select_type, self.start_date,
                                                              self.end_date, self.merge_data_path, order_book_id)
        df = get_source_tick_data_from_merge_path(merge_file_li)
        return df

    def single_symbol_load(self, _symbol):
        order_book_id = _symbol
        print('order book id:', _symbol)
        for k, v in symbol_mapping.items():
            if k in _symbol:
                order_book_id = _symbol.replace(k, v)
                break
        print('order book id mapping:', order_book_id)
        source_file, trans_file = get_file_name_li(self.select_type, self.start_date,
                                                   self.end_date, self.data_path, _symbol)
        select_columns = ['datetime', 'last', 'open', 'high', 'low', 'volume', 'volume_add',
                          'total_turnover',
                          # 'volume_cum', 'total_turnover_cum',
                          'b_vol', 's_vol', 'b_unit', 's_unit', 'b_vol_total_value', 's_vol_total_value',
                          'open_interest',
                          'a1', 'a1_v', 'a2', 'a2_v', 'a3', 'a3_v', 'a4', 'a4_v', 'a5', 'a5_v',
                          'a6', 'a6_v', 'a7', 'a7_v', 'a8', 'a8_v', 'a9', 'a9_v', 'a10', 'a10_v',
                          'b1', 'b1_v', 'b2', 'b2_v', 'b3', 'b3_v', 'b4', 'b4_v', 'b5', 'b5_v',
                          'b6', 'b6_v', 'b7', 'b7_v', 'b8', 'b8_v', 'b9', 'b9_v', 'b10', 'b10_v',
                          'close', 'limit_up', 'limit_down',
                          'order_book_id', 'trading_date']
        if len(source_file) != len(trans_file):
            print('skip --------------')
            return pd.DataFrame(columns=select_columns)
        if len(source_file) == len(trans_file) == 0:
            print(' no data, skip ------------')
            return pd.DataFrame(columns=select_columns)
        df = get_source_tick_data(self.select_type, source_file, trans_file, order_book_id, self.sys_freq)
        return df

    def load_symbols_data_from_merge_path(self):
        with ThreadPoolExecutor(15) as executor:
            # 一个symbol对应一天或多天
            future_list = [executor.submit(self.single_symbol_load_from_merge_path, _symbol) for _symbol in self.symbols]
        result_df = pd.DataFrame()
        for future in as_completed(future_list):
            result = future.result()
            result_df = result_df.append(result)
        result_df.sort_index(inplace=True)
        print('total length:', len(result_df))
        # time.sleep(500)
        return result_df

    def load_symbols_data(self):
        with ThreadPoolExecutor(10) as executor:
            # 一个symbol对应一天或多天
            future_list = [executor.submit(self.single_symbol_load, _symbol) for _symbol in self.symbols]
        result_df = pd.DataFrame()
        for future in as_completed(future_list):
            result = future.result()
            result_df = result_df.append(result)
        result_df.sort_index(inplace=True)
        print('total length:', len(result_df))
        # result_df.to_csv('./test2.csv')
        # time.sleep(500)
        # result_df = pd.DataFrame()
        # for _symbol in self.symbols:
        #     result = self.single_symbol_load(_symbol)
        #     result_df = result_df.append(result)
        # result_df.sort_index(inplace=True)
        # print('total length:', len(result_df))
        # result_df[[]].to_csv('./test2.csv')
        return result_df


class CurrencyProcess(DataProcess):
    def reform_data(self):
        snap_data = pd.read_csv(self.snap_path)
        trans_data = pd.read_csv(self.trans_path)
        snap_data = self.pre_process_columns(snap_data)
        trans_data = self.pre_process_columns(trans_data)
        print('snap data origin length:', len(snap_data))
        print('trans data origin length:', len(trans_data))
        snap_data = snap_data.resample(self.sys_freq, closed='right', label='right').last().fillna(method='pad')
        # print('snap data resample length:', len(snap_data))
        trans_data['vol_transaction_temp'] = trans_data['sz']
        mask = trans_data['side'] == 'sell'
        trans_data.loc[mask, 'vol_transaction_temp'] = 0
        b_df = trans_data['vol_transaction_temp'].resample(self.sys_freq, closed='right', label='right').sum()
        b_df.name = 'b_vol'
        # print('b_df length:', len(b_df))
        # 成交笔数
        b_unit_df = (trans_data['vol_transaction_temp'] != 0).resample(self.sys_freq, closed='right',
                                                                       label='right').sum()
        b_unit_df.name = 'b_unit'
        # print('b_unit_df length:', len(b_unit_df))
        trans_data['vol_transaction_temp'] = trans_data['sz']
        mask = trans_data['side'] == 'buy'
        trans_data.loc[mask, 'vol_transaction_temp'] = 0
        s_df = trans_data['vol_transaction_temp'].resample(self.sys_freq, closed='right', label='right').sum()
        s_df.name = 's_vol'
        # print('s_df length:', len(s_df))
        # 成交笔数
        s_unit_df = (trans_data['vol_transaction_temp'] != 0).resample(self.sys_freq, closed='right',
                                                                       label='right').sum()
        s_unit_df.name = 's_unit'
        # print('s_unit_df length:', len(s_unit_df))
        p_df = trans_data['px'].resample(self.sys_freq, closed='right', label='right').mean().fillna(method='pad')
        p_df.name = 't_close'
        # print('p_df length:', len(p_df))
        p_high_df = trans_data['px'].resample(self.sys_freq, closed='right', label='right').max().fillna(method='pad')
        p_high_df.name = 'high'
        # print('p_high_df length:', len(p_high_df))
        p_low_df = trans_data['px'].resample(self.sys_freq, closed='right', label='right').min().fillna(method='pad')
        p_low_df.name = 'low'
        # print('p_low_df length:', len(p_low_df))
        p_open_df = trans_data['px'].resample(self.sys_freq, closed='right', label='right').first().fillna(method='pad')
        p_open_df.name = 'open'
        # print(p_open_df)
        # time.sleep(500)
        # print('p_low_df length:', len(p_open_df))
        trans_df = pd.concat([b_df, s_df, p_df, p_high_df, p_low_df, p_open_df, b_unit_df, s_unit_df], axis=1)
        # print('trans data resemble length:', len(trans_df))
        data = pd.merge(left=snap_data, right=trans_df, left_index=True, right_index=True, how='left')
        select_cols = ['high', 'low', 'open',
                       'askPx0',  'askSz0',  'askPx1',  'askSz1',  'askPx2',  'askSz2',
                       'askPx3',  'askSz3',  'askPx4',  'askSz4',  'askPx5',  'askSz5',
                       'askPx6',  'askSz6',  'askPx7',  'askSz7',  'askPx8',  'askSz8',
                       'askPx9',  'askSz9',
                       # 'askPx10', 'askSz10',
                       # 'askPx11', 'askSz11', 'askPx12', 'askSz12', 'askPx13', 'askSz13',
                       # 'askPx14', 'askSz14', 'askPx15', 'askSz15', 'askPx16', 'askSz16',
                       # 'askPx17', 'askSz17', 'askPx18', 'askSz18', 'askPx19', 'askSz19',
                       'bidPx0',  'bidSz0',  'bidPx1',  'bidSz1',  'bidPx2',  'bidSz2',
                       'bidPx3',  'bidSz3',  'bidPx4',  'bidSz4',  'bidPx5',  'bidSz5',
                       'bidPx6',  'bidSz6',  'bidPx7',  'bidSz7',  'bidPx8',  'bidSz8',
                       'bidPx9',  'bidSz9',
                       # 'bidPx10', 'bidSz10', 'bidPx11', 'bidSz11',
                       # 'bidPx12', 'bidSz12', 'bidPx13', 'bidSz13', 'bidPx14', 'bidSz14',
                       # 'bidPx15', 'bidSz15', 'bidPx16', 'bidSz16', 'bidPx17', 'bidSz17',
                       # 'bidPx18', 'bidSz18', 'bidPx19', 'bidSz19',
                       'b_vol', 's_vol', 't_close', 'b_unit', 's_unit']
        data = data[select_cols]
        data.rename(columns={'t_close': 'close',
                             'askPx0': 'prc_ask_001',  'askSz0': 'vol_ask_001',
                             'askPx1': 'prc_ask_002',  'askSz1': 'vol_ask_002',
                             'askPx2': 'prc_ask_003',  'askSz2': 'vol_ask_003',
                             'askPx3': 'prc_ask_004',  'askSz3': 'vol_ask_004',
                             'askPx4': 'prc_ask_005',  'askSz4': 'vol_ask_005',
                             'askPx5': 'prc_ask_006',  'askSz5': 'vol_ask_006',
                             'askPx6': 'prc_ask_007',  'askSz6': 'vol_ask_007',
                             'askPx7': 'prc_ask_008',  'askSz7': 'vol_ask_008',
                             'askPx8': 'prc_ask_009',  'askSz8': 'vol_ask_009',
                             'askPx9': 'prc_ask_010',  'askSz9': 'vol_ask_010',
                             'bidPx0': 'prc_bid_001',  'bidSz0': 'vol_bid_001',
                             'bidPx1': 'prc_bid_002',  'bidSz1': 'vol_bid_002',
                             'bidPx2': 'prc_bid_003',  'bidSz2': 'vol_bid_003',
                             'bidPx3': 'prc_bid_004',  'bidSz3': 'vol_bid_004',
                             'bidPx4': 'prc_bid_005',  'bidSz4': 'vol_bid_005',
                             'bidPx5': 'prc_bid_006',  'bidSz5': 'vol_bid_006',
                             'bidPx6': 'prc_bid_007',  'bidSz6': 'vol_bid_007',
                             'bidPx7': 'prc_bid_008',  'bidSz7': 'vol_bid_008',
                             'bidPx8': 'prc_bid_009',  'bidSz8': 'vol_bid_009',
                             'bidPx9': 'prc_bid_010',  'bidSz9': 'vol_bid_010',
                             }, inplace=True)
        mask = pd.isna(data['close']) | (data['close'] == 0)
        data = data[~mask]
        data['temp'] = data['b_vol'] + data['s_vol']
        data['volume'] = data['temp'].cumsum()
        data['total_turnover'] = data['temp'] * data['close']
        data['total_turnover'] = data['total_turnover'].cumsum()
        data['volume_add'] = data['b_vol'] + data['s_vol']
        print('merge length:', len(data))
        return data


router_map = {'stock': StockProcess, 'd_currency': CurrencyProcess}


def sort_num(n):
    return float(n)


def update_add(data_pre, data_add, update_type='bid'):
    # print('data_pre:', data_pre)
    # print('data_add:', data_add)
    # 合并数据
    for i in data_add:
        bid_price = i[0]
        for j in data_pre:
            if bid_price == j[0]:
                if i[1] == 0:
                    data_pre.remove(j)
                    break
                else:
                    del j[1]
                    j.insert(1, i[1])
                    break
        else:
            if i[1] != 0:
                data_pre.append(i)
    else:

        reverse_flag = True if update_type == 'bid' else False
        data_pre.sort(key=lambda price: sort_num(price[0]), reverse=reverse_flag)
        # data_add = pd.DataFrame(data_add)
        # data_add = data_add[data_add.iloc[:, 0] > 0].sort_values(0, ascending=reverse_flag).head(20)
        # data_add.index = range(1, 21)

    return data_pre[:10]


def split_data(res, start_idx0, start_idx1):
    asks_p = list()
    # for i in range(10, 30, 2):
    for i in range(start_idx0, start_idx0 + 20, 2):
        asks_p.append([res[i], res[i + 1]])
    bids_p = list()
    # for i in range(30, 50, 2):
    for i in range(start_idx1, start_idx1 + 20, 2):
        bids_p.append([res[i], res[i + 1]])
    # print(asks_p)
    # print(bids_p)
    # time.sleep(500)
    return asks_p, bids_p


def reform_factor_data(data):
    print('reform factor data------')
    print(data.columns)
    start_idx0 = list(data.columns).index('a1')
    start_idx1 = list(data.columns).index('b1')
    print('a1 idx:', start_idx0, '   ', 'b1 idx:', start_idx1)
    data_array = data.to_numpy()
    # print(data.head(10))
    # print(data_array[0:10])
    askp_list = list()
    askv_list = list()
    bidp_list = list()
    bidv_list = list()

    res_pre = data_array[0, :]
    asks_p_pre, bids_p_pre = split_data(res_pre, start_idx0, start_idx1)
    asks_p_add_dt = pd.DataFrame(asks_p_pre) if len(asks_p_pre) else pd.DataFrame([np.nan, np.nan]).T
    bids_p_add_dt = pd.DataFrame(bids_p_pre) if len(bids_p_pre) else pd.DataFrame([np.nan, np.nan]).T
    # print(asks_p_add_dt)
    # print(bids_p_add_dt)
    askp_list.append(asks_p_add_dt.iloc[:, 0])
    askv_list.append(asks_p_add_dt.iloc[:, 1])
    bidp_list.append(bids_p_add_dt.iloc[:, 0])
    bidv_list.append(bids_p_add_dt.iloc[:, 1])

    len_data = data.shape[0]

    for i in range(1, len_data):
        # print(i, len_data)
        res_now = data_array[i, :]
        asks_p_now, bids_p_now = split_data(res_now, start_idx0, start_idx1)
        asks_p_add = update_add(asks_p_pre, asks_p_now, 'ask')
        bids_p_add = update_add(bids_p_pre, bids_p_now, 'bid')
        # print(asks_p_add)
        asks_p_add_dt = pd.DataFrame(asks_p_add) if len(asks_p_add) else pd.DataFrame([np.nan, np.nan]).T
        bids_p_add_dt = pd.DataFrame(bids_p_add) if len(bids_p_add) else pd.DataFrame([np.nan, np.nan]).T
        # print(asks_p_add_dt)
        askp_list.append(asks_p_add_dt.iloc[:, 0])
        askv_list.append(asks_p_add_dt.iloc[:, 1])
        bidp_list.append(bids_p_add_dt.iloc[:, 0])
        bidv_list.append(bids_p_add_dt.iloc[:, 1])

        asks_p_pre = asks_p_add.copy()
        bids_p_pre = bids_p_add.copy()

    askp_dt = pd.concat(askp_list, axis=1).T.reset_index(drop=True)
    askv_dt = pd.concat(askv_list, axis=1).T.reset_index(drop=True)
    bidp_dt = pd.concat(bidp_list, axis=1).T.reset_index(drop=True)
    bidv_dt = pd.concat(bidv_list, axis=1).T.reset_index(drop=True)

    data_datetime = data.index
    askp_dt.index = data_datetime
    askv_dt.index = data_datetime
    bidp_dt.index = data_datetime
    bidv_dt.index = data_datetime

    print('askp_dt length:', len(askp_dt))

    result_dict = {'askp': askp_dt, 'askv': askv_dt, 'bidp': bidp_dt, 'bidv': bidv_dt,
                   'price': data['last'].to_frame(), 's_volume': data['s_vol'].to_frame(),
                   'b_volume': data['b_vol'].to_frame(), 'b_unit': data['b_unit'].to_frame(),
                   's_unit':  data['s_unit'].to_frame()
                   }
    # print(result_dict['price'].head(10))
    # print(result_dict['b_volume'].head(10))
    return result_dict


def get_last_30_minute_price_factor(data):
    period = 5
    threshold = 0.01
    # print(data)
    # print(data[(data.index > pd.to_datetime('2022-01-10 14:49:00')) & (
    #                  data.index <= pd.to_datetime('2022-01-10 14:50:00'))])
    data['factor'] = data['last'].pct_change(periods=period)
    # print(factor.head(10))
    mask = (data['factor'] > threshold) & ((data['factor'].index.hour >= 14) & ((data['factor'].index.hour == 14) &
                                                                (data['factor'].index.minute >= 30)))
    # mask = ((factor.index.hour >= 14) & ((factor.index.hour == 14) & (factor.index.minute >= 30)))
    factor = data[mask]
    # print(factor.head(10))
    if factor.empty:
        # print('factor is empty')
        return pd.DataFrame(columns=['order_book_id', 'factor', 'datetime'])
    # time.sleep(5)
    return factor[['order_book_id', 'factor', 'datetime']]


def get_all_symbols_price_factor(data):
    if not data.empty:
        df = data.groupby(['order_book_id'], as_index=False).apply(get_last_30_minute_price_factor)
        df = df.reset_index(drop=True)
        # print(df.head(10))
        if not df.empty:
            return df
        else:
            return pd.DataFrame(columns=['order_book_id', 'factor', 'datetime'])
    else:
        return pd.DataFrame(columns=['order_book_id', 'factor', 'datetime'])


def get_last_price_factor(data):
    mask = (((data.index.hour == 14) & (data.index.minute == 30)) |
            ((data.index.hour == 14) & (data.index.minute == 55)))
    factor = data[mask]
    diff = factor['last'][-1] - factor['last'][0]
    date = factor['trading_date'].values[0]
    order_book_id = factor['order_book_id'].values[0]
    # print(order_book_id, date, diff)
    df = pd.DataFrame(data=[[order_book_id, date, diff]], columns=['order_book_id', 'trading_date', 'factor'])
    # print(factor)
    # time.sleep(500)
    return df


def get_all_symbols_factors(data):
    if not data.empty:
        total_df = data.groupby(['order_book_id', 'trading_date'], as_index=False).apply(get_last_price_factor)
        factor = total_df.sort_values(by=['factor'], ascending=False).reset_index(drop=True)
        # print(factor)
        q1 = factor[0:20]['order_book_id'].values.tolist()
        q2 = factor[20:40]['order_book_id'].values.tolist()
        q3 = factor[40:60]['order_book_id'].values.tolist()
        q4 = factor[60:80]['order_book_id'].values.tolist()
        q5 = factor[80:100]['order_book_id'].values.tolist()
        return q1, q2, q3, q4, q5
    else:
        return [], [], [], [], []


def get_cash_in_factor(data):
    mask = ((data.index.hour == 9) & (data.index.minute >= 30)) | \
           ((data.index.hour == 10) & (data.index.minute <= 30))
    factor = data[mask]
    # print(factor.head(5))
    # time.sleep(500)
    ratio = np.where((factor['b_unit'] != 0) & ((factor['b_unit'] + factor['s_unit']) != 0),
                     (factor['b_vol_total_value']/factor['b_unit']) /
                     ((factor['b_vol_total_value'] + factor['b_vol_total_value']) /
                      (factor['b_unit'] + factor['s_unit'])),
                     0)
    # print(ratio)
    # time.sleep(500)
    ratio = np.average(ratio[ratio != 0])
    date = factor['trading_date'].values[0]
    order_book_id = factor['order_book_id'].values[0]
    # print(order_book_id, date, diff)
    df = pd.DataFrame(data=[[order_book_id, date, ratio]], columns=['order_book_id', 'trading_date', 'factor'])
    # print(factor)
    # time.sleep(500)
    return df


def get_cash_flow_in_factors(data):
    if not data.empty:
        total_df = data.groupby(['order_book_id', 'trading_date'], as_index=False).apply(get_cash_in_factor)
        factor = total_df.sort_values(by=['factor'], ascending=False).reset_index(drop=True)
        q1 = factor[0:20]['order_book_id'].values.tolist()
        q2 = factor[20:40]['order_book_id'].values.tolist()
        q3 = factor[40:60]['order_book_id'].values.tolist()
        q4 = factor[60:80]['order_book_id'].values.tolist()
        q5 = factor[80:100]['order_book_id'].values.tolist()
        return q1, q2, q3, q4, q5
    else:
        return [], [], [], [], []


def read_factors(date, symbol_map, path=r'C:\Users\huajia\Desktop\rqalpha4\rqalpha\f'):
    __ = dict(symbol_map.items()).copy()
    date = ''.join(date.split('-'))
    file_name = 'r{}.csv'.format(date)
    file_path = os.path.join(path, file_name)
    print(file_path)
    if not os.path.exists(file_path):
        return [], [], [], [], []
    csv_df = pd.read_csv(file_path)
    # print(csv_df.head(10))
    if not csv_df.empty:
        factor = csv_df.sort_values(by=['FI_AMT_BUY_BIGORDER_RATIO_AFTERNOON30_RANKMEAN10'], ascending=False)
        q = factor['KEYID'].values.tolist()
        q = [__[i] for i in q if i in __]
        # q1 = q[0:900]
        # q2 = q[900:1800]
        # q3 = q[1800:2700]
        # q4 = q[2700:3600]
        # q5 = q[3600:4500]
        q1 = q[0:500]
        q2 = q[500:1000]
        q3 = q[1000:1500]
        q4 = q[1500:2000]
        q5 = q[2000:2500]
        # time.sleep(5000)
        return q1, q2, q3, q4, q5
    else:
        return [], [], [], [], []


def read_signal(symbols_mapping_dict, path=r'C:\Users\huajia\Desktop\rqalpha4\rqalpha\f1.csv'):
    csv_df = pd.read_csv(path)
    csv_df.rename(columns={'Unnamed: 0': 'the_date'}, inplace=True)
    csv_df.set_index('the_date', inplace=True)
    # print(csv_df.shape)
    # print(csv_df.head(10))
    # print(symbols_mapping_dict)
    old_cols = []
    new_cols = []
    for col in csv_df.columns:
        if col in symbols_mapping_dict:
            old_cols.append(col)
            new_cols.append(symbols_mapping_dict[col])
    # print(old_cols, new_cols)
    csv_df = csv_df[old_cols]
    csv_df.columns = new_cols
    # print(csv_df)
    mask = csv_df.isnull().all(1)
    csv_df = csv_df[~mask]
    # print(csv_df.head(10))
    # time.sleep(500)
    if not csv_df.empty:
        return csv_df
    return pd.DataFrame()


def read_adj_factor_price(dt, symbol, path=r'D:\BaiduNetdiskDownload\price\price\s_dq_adjfactor'):
    dt = ''.join(dt.split('-'))
    path = os.path.join(path, 'r{}'.format(dt) + '.csv')
    __order_book_id = symbol
    if symbol.endswith('.XSHE'):
        __order_book_id = symbol.replace('.XSHE', '.SZ', )
    if symbol.endswith('.XSHG'):
        __order_book_id = symbol.replace('XSHG', '.SH', )
    df = pd.read_csv(path)
    # print(path)
    # print(__order_book_id)
    df.set_index(['KEYID'], inplace=True)
    # print('get_from adj factor price:', __order_book_id)
    if __order_book_id in df.index:
        price = df.loc[__order_book_id].values[0]
        # print('get_from adj factor price:', __order_book_id, '  ',  price)
        return price
    else:
        # print('get_from adj factor price:', __order_book_id, '  ', None)
        return None


def split_factors(factor_df, dt, last_li):
    from datetime import timedelta
    before_dt = pd.to_datetime(dt, format='%Y-%m-%d') - timedelta(days=1)
    # before_dt = pd.to_datetime(dt, format='%Y-%m-%d')
    before_dt = pd.to_datetime(before_dt, format='%Y-%m-%d').strftime('%Y-%m-%d')
    print('before_dt:', before_dt, 'now dt:', dt)
    while before_dt not in factor_df.index:
        before_dt = pd.to_datetime(before_dt, format='%Y-%m-%d') - timedelta(days=1)
        before_dt = pd.to_datetime(before_dt, format='%Y-%m-%d').strftime('%Y-%m-%d')
        print(before_dt)
    sub_factor = factor_df.loc[before_dt]
    sub_factor.sort_values(ascending=False, inplace=True)
    last_factor = sub_factor[sub_factor.index.isin(last_li)]
    last_factor.sort_values(ascending=False, inplace=True)
    print(sub_factor[0:5])
    print(last_factor[0:5])
    # time.sleep(10)
    # print(sub_factor[0:5])
    # print(sub_factor)
    # print(sub_factor.index)
    # print(type(sub_factor))
    # time.sleep(5000)
    li = sub_factor.index.tolist()
    last_li = last_factor.index.tolist()
    print(last_li[-20:])
    # time.sleep(10)
    if len(li) == 0:
        return [], [], [], [], [], []
    else:
        # return li[0: 100], li[100:200], li[200:300], li[300:400], li[400:500], last_li[-20:]
        return li[0: 100], li[100:200], li[200:300], li[300:400], li[400:500], last_li[-10:], sub_factor
        # return li[0: 10], li[100:200], li[200:300], li[300:400], li[400:500], last_li[-2:]
        # return li[0: 2], li[100:200], li[200:300], li[300:400], li[400:500], last_li[-2:]



