# -*- coding: utf-8 -*-
import rqdatac
import pandas as pd
import pathlib
import pickle
import time


def refresh_conbond(cache_dir: pathlib.Path,
                    start_date,
                    end_date,
                    freq,
                    username=None,
                    password=None):
    if username is not None:
        rqdatac.init(username, password)

    f_all_instruments = cache_dir.joinpath('all_instruments.csv')
    if f_all_instruments.exists():
        df_all_instruments = pd.read_csv(f_all_instruments,
                                         index_col=['order_book_id'])
    else:
        print('Read all_instruments from rqdata')
        df_all_instruments = rqdatac.convertible.all_instruments()
        df_all_instruments['trading_hours'] = '09:31-11:30,13:01-15:00'
        df_all_instruments['board_type'] = 'MainBoard'
        df_all_instruments['type'] = 'CS'
        df_all_instruments['market_tplus'] = 0
        df_all_instruments['round_lot'] = 10
        df_all_instruments['account_type'] = 'STOCK'
        df_all_instruments.set_index('order_book_id', inplace=True)
        df_all_instruments.to_csv(f_all_instruments)

    f_call_info = cache_dir.joinpath('call_info.csv')
    if not f_call_info.exists():
        print('Read call_info from rqdata')
        df_call_info = rqdatac.convertible.get_call_info(
            df_all_instruments.index.tolist(),
            # data size is small, always get full data
            start_date='2017-01-01',
            end_date=end_date)
        df_call_info.to_csv(f_call_info)
    # point fix: incorrect call info_date: 110042, 113562, detected by double_low_rank
    # just update the call_info.csv file, and rerun to generate the combined file

    f_conversion_info = cache_dir.joinpath('conversion_info.csv')
    if not f_conversion_info.exists():
        print('Read conversion_info from rqdata')
        df_conversion_info = rqdatac.convertible.get_conversion_info(
            df_all_instruments.index.tolist(),
            start_date=start_date,
            end_date=end_date)
        df_conversion_info.to_csv(f_conversion_info)

    f_suspended = cache_dir.joinpath('suspended.csv')
    if not f_suspended.exists():
        print('Read suspended from rqdata')
        df_suspended = rqdatac.convertible.is_suspended(
            df_all_instruments.index.tolist(),
            start_date=start_date,
            end_date=end_date)
        df_suspended.index.rename('date', inplace=True)
        df_suspended.to_csv(f_suspended, index=True)

    f_indicators = cache_dir.joinpath('indicators.csv')
    if not f_indicators.exists():
        print('Read indicators from rqdata')
        df_indicators = rqdatac.convertible.get_indicators(
            df_all_instruments.index.tolist(),
            start_date=start_date,
            end_date=end_date)
        df_indicators.to_csv(f_indicators)

    f_conversion_price = cache_dir.joinpath('conversion_price.csv')
    if not f_conversion_price.exists():
        print('Read conversion_price from rqdata')
        df_conversion_price = rqdatac.convertible.get_conversion_price(
            df_all_instruments.index.tolist(),
            # data size is small, always get full data
            start_date='2017-01-01',
            end_date=end_date)
        df_conversion_price.to_csv(f_conversion_price)

    f_put_info = cache_dir.joinpath('put_info.csv')
    if not f_put_info.exists():
        print('Read put_info from rqdata')
        df_put_info = rqdatac.convertible.get_put_info(
            df_all_instruments.index.tolist(),
            # data size is small, always get full data
            start_date='2017-01-01',
            end_date=end_date)
        df_put_info.to_csv(f_put_info)

    f_bond_price = cache_dir.joinpath('bond_price_%s.csv' % freq)
    if not f_bond_price.exists():
        print('Read bond_price from rqdata')
        if freq == '1d':
            df_bond_price = rqdatac.get_price(
                df_all_instruments.index.tolist(),
                start_date=start_date,
                end_date=end_date,
                frequency=freq)
            df_bond_price.to_csv(f_bond_price)
        else:
            ed = pd.Timestamp(end_date)
            dr = list(pd.date_range(pd.Timestamp(start_date), ed, freq='M'))
            if ed != dr[:-1]:
                dr.append(ed)
            start = start_date
            for end in list(dr):
                f = cache_dir.joinpath('bond_price_%s_%02d.csv' %
                                       (end.year, end.month))
                if f.exists():
                    print('Found %s' % f.absolute())
                else:
                    print('%s - %s' % (start, end))
                    df = rqdatac.get_price(df_all_instruments.index.tolist(),
                                           start_date=start,
                                           end_date=end,
                                           frequency=freq)
                    df.to_csv(f)
                    time.sleep(1)
                start = end + pd.Timedelta('1d')

    f_stock_price = cache_dir.joinpath('stock_price_%s.csv' % freq)
    if not f_stock_price.exists():
        print('Read stock_price from rqdata')
        if freq == '1d':
            df_stock_price = rqdatac.get_price(
                df_all_instruments.stock_code.tolist(),
                start_date=start_date,
                end_date=end_date,
                frequency=freq)
            df_stock_price.to_csv(f_stock_price)
        else:
            ed = pd.Timestamp(end_date)
            dr = list(pd.date_range(pd.Timestamp(start_date), ed, freq='M'))
            if ed != dr[:-1]:
                dr.append(ed)
            start = start_date
            for end in list(dr):
                f = cache_dir.joinpath('stock_price_%s_%02d.csv' %
                                       (end.year, end.month))
                if f.exists():
                    print('Found %s' % f.absolute())
                else:
                    print('%s - %s' % (start, end))
                    df = rqdatac.get_price(
                        df_all_instruments.stock_code.tolist(),
                        start_date=start,
                        end_date=end,
                        frequency=freq)
                    df.to_csv(f)
                    time.sleep(1)
                start = end + pd.Timedelta('1d')

    f_instrument_industry = cache_dir.joinpath('instrument_industry.csv')
    if not f_instrument_industry.exists():
        print('Read instrument_industry from rqdata')
        df_instrument_industry = rqdatac.convertible.get_instrument_industry(
            df_all_instruments.index.tolist(), level=0)
        df_instrument_industry.to_csv(f_instrument_industry)


def combine_conbond(cache_dir: pathlib.Path, freq):
    if freq == '1d':
        f_bond_price = cache_dir.joinpath('bond_price_1d.csv')
        bond_price = pd.read_csv(f_bond_price)
        bond_price.rename(columns={'date': 'datetime'}, inplace=True)
    else:
        dfs = []
        for f in cache_dir.glob('bond_price_2*.csv'):
            df = pd.read_csv(f)
            dfs.append(df)
        bond_price = pd.concat(dfs)
    # volume from the data source is in terms of 1000
    bond_price['volume'] = bond_price.volume * 10
    bond_price.set_index(['order_book_id'], inplace=True)

    # Add stock_code for joining
    print("Adding stock_code")
    f_all_instruments = cache_dir.joinpath('all_instruments.csv')
    all_instruments = pd.read_csv(f_all_instruments,
                                  index_col=['order_book_id'])
    df = bond_price.join(all_instruments[['stock_code']]).reset_index()

    # Add stock_price column
    print("Adding stock_price")
    if freq == '1d':
        f_stock_price = cache_dir.joinpath('stock_price_%s.csv' % freq)
        stock_price = pd.read_csv(f_stock_price)
        stock_price.rename(columns={
            'date': 'datetime',
        }, inplace=True)
    else:
        dfs = []
        for f in cache_dir.glob('stock_price_2*.csv'):
            spdf = pd.read_csv(f)
            dfs.append(spdf)
        stock_price = pd.concat(dfs)
    stock_price.rename(columns={
        'order_book_id': 'stock_code',
        'open': 'stock_open',
        'close': 'stock_close',
    },
                       inplace=True)

    df = df.merge(
        stock_price[['stock_code', 'datetime', 'stock_open', 'stock_close']],
        on=['stock_code', 'datetime'],
        how='left')
    df['date'] = pd.to_datetime(df.datetime).dt.date.astype(str)
    df.drop(columns=['stock_code'], inplace=True)
    df.set_index(['order_book_id', 'date'], inplace=True)

    # Add columns from indicators
    print("Adding indicators")
    f_indicators = cache_dir.joinpath('indicators.csv')
    indicators = pd.read_csv(f_indicators, index_col=['order_book_id', 'date'])
    df = df.join(indicators[[
        'conversion_value', 'yield_to_maturity', 'remaining_size',
        'double_low_factor', 'conversion_premium', 'call_qualified_days',
        'put_qualified_days'
    ]])

    # Add suspended column
    print("Adding suspended")
    f_suspended = cache_dir.joinpath('suspended.csv')
    suspended = pd.read_csv(f_suspended, index_col=['date'])
    cols, series = zip(*suspended.iteritems())
    suspended = pd.concat(series, keys=cols,
                          names=['order_book_id'
                                 ]).to_frame().rename(columns={0: 'suspended'})
    df = df.join(suspended)
    # suspended: e.g. 128064.XSHE
    print("Fillna of suspended")
    df.fillna({'suspended': False}, inplace=True)
    # 113010.XSHG, 2019-02-13, nan for conversion_value,yield_to_maturity,remaining_size,double_low_factor,convert_premium_rate,call_qualified_days,put_qualified_days
    df.loc[:, ['conversion_premium']] = df.loc[:,
                                               ['conversion_premium']].ffill()

    # Add force_redeem column
    print("Adding force_redeem")
    f_call_info = cache_dir.joinpath('call_info.csv')
    call_info = pd.read_csv(f_call_info, index_col=['order_book_id'])
    gs = []
    for k, g in df.groupby('order_book_id'):
        try:
            info_date = call_info.loc[k].info_date
            g['force_redeem'] = g.apply(lambda r: r.name[1] > info_date,
                                        axis=1)
        except KeyError as e:
            g['force_redeem'] = False
        gs.append(g)
    df = pd.concat(gs)
    assert df[pd.isna(df.force_redeem)].empty

    # TODO: Add conversion_price
    # Add conversion_price column
    #  f_conversion_price = cache_dir.joinpath('conversion_price.csv')
    #  conversion_price = pd.read_csv(f_conversion_price)
    #  conversion_price.set_index()

    # TODO: leverage other information (e.g. industry)

    print("Rename")
    df.rename(columns={
        'conversion_premium': 'convert_premium_rate',
    },
              inplace=True)

    print("Write to csv")
    f_conbonds = cache_dir.joinpath('conbonds_%s.csv' % freq)
    df.reset_index().drop(columns=['date']).sort_values(
        ['datetime', 'order_book_id']).to_csv(f_conbonds, index=False)
    return df


if __name__ == '__main__':
    import pathlib
    import json
    from easydict import EasyDict
    auth_file = pathlib.Path('~/.cache/quant').expanduser().joinpath(
        '.auth.json')
    authdata = EasyDict(json.load(auth_file.open('r')))
    rqdatac.init(authdata.ricequant.username, authdata.ricequant.password)
    data = []
    for ins in rqdatac.instruments(['300059.XSHE', '000300.XSHG']):
        data.append(ins.__dict__)
    df = pd.DataFrame(data)
    df.to_csv(
        pathlib.Path('~/.cache/quant/stock/2022-02-14/').expanduser().joinpath(
            'all_instruments.csv'),
        index=False)
