import datetime
import io
import json
import re
import time
from typing import *
import aiohttp
import asyncio

import zipfile
import pandas as pd
from httpx import NetworkError
from loguru import logger
from apscheduler.schedulers.blocking import BlockingScheduler
from utils.data.dataCheck import Checker
from utils.data.sqlLoader import PgLoader

from config import PROXY_CONF, ROOT_DIR


def retry(times: int = 5):
    def decorator(func):
        def wrapper(*args, **kwargs):
            for i in range(times):
                try:
                    return func(*args, **kwargs)
                except Exception as e:
                    logger.error(f'执行失败：{e.__str__()}' + ',重试...' if i < times else '')
            else:
                raise NetworkError(f'重试{times}次失败')

        return wrapper

    return decorator


class DataDownloader:
    data_host = 'https://data.binance.vision'
    spot_api: str = 'https://api3.binance.com'
    swap_api: str = 'https://fapi.binance.com'
    api_weight: int = 2500

    def __int__(self):
        self.aps = BlockingScheduler()

    @staticmethod
    async def _fetch(session: aiohttp.ClientSession, url: str, method: str = 'GET', params: dict = None,
                     headers: dict = None, async_: bool = False) -> Union[Any, Tuple[bool, Any]]:

        resp = await session.request(method, url, params=params, headers=headers, proxy=PROXY_CONF.get('https'))

        if resp.status != 200:
            if not async_:
                raise IOError(f"获取数据失败：{url}，{await resp.text()}")
            logger.info(f'请求失败：{await resp.text()}')
            return False, None
        return (True, await resp.read()) if async_ else await resp.read()

    @classmethod
    async def fetch(cls, urls: Union[str, Sequence[str]], method: str = 'GET',
                    headers: Dict[str, str] = None) -> Union[Any, List[Tuple[bool, Any]]]:
        async with aiohttp.ClientSession() as session:
            if headers is not None:
                session.headers.update(headers)
            if isinstance(urls, str):
                return await cls._fetch(session, urls, method)
            tasks = [cls._fetch(session, x, method, async_=True) for x in urls]
            resp: Tuple[Any] = await asyncio.gather(*tasks)

            return list(resp)

    @classmethod
    def symbols(cls, spot: bool = True) -> Dict[str, int]:
        """
        获取币种上线时间
        :param spot:
        :return:
        """
        stable_symbol = ['BKRW', 'USDC', 'USDP', 'TUSD', 'BUSD', 'FDUSD', 'DAI', 'EUR', 'GBP']
        try:
            url = cls.spot_api + '/api/v3/exchangeInfo' if spot else cls.swap_api + '/fapi/v1/exchangeInfo'
            resp: Dict[str, Any] = json.loads(asyncio.run(cls.fetch(url)))
            result = {}
            for symbol in resp['symbols']:
                if (symbol['quoteAsset'] != 'USDT' or
                        (symbol['symbol'].replace('USDT', '') in stable_symbol) or
                        symbol['symbol'].replace('USDT', '').endswith(
                            ('UP', 'DOWN', 'BEAR', 'BULL'))):
                    continue
                if not spot and symbol['contractType'] != 'PERPETUAL':
                    continue
                result[symbol['symbol']] = symbol['onboardDate'] / 1000
        except Exception as e:
            logger.error(f'获取币种信息异常：{e.__str__()}')
            raise e

        return result

    @retry(5)
    def spot_kline(self, start: str, end: str, period: str = '1h', symbols: List[str] = None, dts: List[str] = None):

        url = self.data_host + f'/data/spot/daily/klines/%s/{period}/'
        if symbols is None:
            symbols: List[str] = list(self.symbols().keys())
        # 时间字符串列表
        date_range: Sequence[str] = pd.date_range(start, end, freq='1d').strftime('%Y-%m-%d') if dts is None else dts
        # 路径 文件名 币种名组成的列表
        all_urls: List[Tuple[str, ...]] = [(f'{url % x}', f'{x}-{period}-{y}', x) for x in symbols for y in date_range]

        result: List[pd.DataFrame] = []
        # 分箱异步获取数据 防止异步请求过多导致超时
        for i in range(0, len(all_urls), 20):
            case = all_urls[i:i + 100]
            resp: List[Tuple[bool, bytes]] = asyncio.run(self.fetch([x[0] + x[1] + '.zip' for x in case]))
            zip_ref: List[str] = [zipfile.ZipFile(io.BytesIO(x[1]), 'r').read(case[j][1] + '.csv').decode()
                                  if x[0] else None
                                  for j, x in enumerate(resp)]
            dfs = [pd.read_csv(io.StringIO(x), header=None) if x is not None else pd.DataFrame() for x in zip_ref]
            for index, item in enumerate(dfs):
                if item.empty:
                    continue
                item.insert(0, 'symbol', all_urls[index][2])
            df = pd.concat(dfs, ignore_index=True)
            if df.empty:
                continue
            df.columns = ['symbol', 'open_time', 'open', 'high', 'low', 'close', 'volume', 'close_time',
                          'quote_asset_volume',
                          'number_of_trades', 'taker_buy_base_asset_volume', 'taker_buy_quote_asset_volume', 'ignore']
            df['open_time'] = (df['open_time'] / 1000).round() * 1000
            df['trade_type'] = 0
            result.append(df)
        return pd.concat(result, ignore_index=True) if result else pd.DataFrame()

    @retry(5)
    def swap_kline(self, start: str, end: str, period: str = '1h', u_base: bool = True, symbols: List[str] = None,
                   dts: List[str] = None):
        url = self.data_host + f'/data/futures/{"um" if u_base else "cm"}/daily/klines/%s/{period}/'
        if symbols is None:
            symbols: List[str] = list(self.symbols(spot=False).keys())
        # 时间字符串列表
        date_range: Sequence[str] = pd.date_range(start, end, freq='1d').strftime('%Y-%m-%d') if dts is None else dts
        # 路径 文件名 币种名组成的列表
        all_urls: List[Tuple[str, ...]] = [(f'{url % x}', f'{x}-{period}-{y}', x) for x in symbols for y in date_range]
        # 分箱异步获取数据 防止异步请求过多导致超时
        result: List[pd.DataFrame] = []
        for i in range(0, len(all_urls), 20):
            case = all_urls[i:i + 100]
            resp: List[Tuple[bool, bytes]] = asyncio.run(self.fetch([x[0] + x[1] + '.zip' for x in case]))
            zip_ref: List[str] = [zipfile.ZipFile(io.BytesIO(x[1]), 'r').read(case[j][1] + '.csv').decode()
                                  if x[0] else None
                                  for j, x in enumerate(resp)]
            dfs = [pd.read_csv(io.StringIO(x), header=None) if x is not None else pd.DataFrame() for x in zip_ref]
            for index, item in enumerate(dfs):
                if not item.empty and not re.match('^\d+$', str(item.iloc[0, 0])):
                    item.drop(index=0, inplace=True)
                item.insert(0, 'symbol', all_urls[index][2])
            df = pd.concat(dfs, ignore_index=True)
            if df.empty:
                continue
            df.columns = ['symbol', 'open_time', 'open', 'high', 'low', 'close', 'volume', 'close_time',
                          'quote_asset_volume',
                          'number_of_trades', 'taker_buy_base_asset_volume', 'taker_buy_quote_asset_volume', 'ignore']
            df['trade_type'] = 1
            df['open_time'] = (df['open_time'].astype(float) / 1000).round() * 1000
            result.append(df)
        return pd.concat(result, ignore_index=True) if result else pd.DataFrame()

    def agg_trades(self):
        pass

    def funding_rate(self):
        pass


def check_fill(info: Dict[int, dict]):
    period_map = {
        60: '1m',
        3600: '1h',
        86400: '1d',
    }
    dl = DataDownloader()
    for period, v in info.items():
        period = int(period)
        loader = PgLoader(0, 1, period=period)
        spot = v.get('0', dict())
        swap = v.get('1', dict())
        for symbol, dts in spot.items():
            df = dl.spot_kline('', '', period_map[period], symbols=[symbol], dts=dts)
            if df.empty:
                continue
            loader.store(df.__array__().tolist())
            logger.info(f'补充spot数据完成：{symbol}, {period_map[period]}, {dts}')

        for symbol, dts in swap.items():
            df = dl.swap_kline('', '', period_map[period], symbols=[symbol], dts=dts)
            if df.empty:
                continue
            loader.store(df.__array__().tolist())
            logger.info(f'补充swap数据完成：{symbol}, {period_map[period]}, {dts}')


def main():
    start_time = datetime.datetime(2024, 2, 1, tzinfo=datetime.timezone.utc)
    end_time = datetime.datetime(2024, 9, 4, tzinfo=datetime.timezone.utc)
    ch = Checker(int(start_time.timestamp()), int(end_time.timestamp()))

    loop = asyncio.new_event_loop()

    # symbol = ['FILUSDT', 'GALAUSDT', 'LRCUSDT', 'LTCUSDT', 'SANDUSDT', 'VETUSDT', 'WAVESUSDT', 'XRPUSDT', 'ZILUSDT']
    # for item in symbol:
    #     loop.run_until_complete(asyncio.gather(
    #         ch.check_symbol(60, item, 0, ),
    #         ch.check_symbol(3600, item, 0, ),
    #         ch.check_symbol(86400, item, 0, ),
    #         loop=loop))
    for k in ['NOTUSDT']:
        loop.run_until_complete(asyncio.gather(
            ch.check_symbol(60, k, 1, onboard=0),
            ch.check_symbol(3600, k, 1, onboard=0),
            ch.check_symbol(86400, k, 1, onboard=0),
            loop=loop))
    with open(ROOT_DIR + r'\locals\output\temp.json', 'w+') as f:
        json.dump(ch.info, f)

    with open(ROOT_DIR + r'\locals\output\temp.json', 'r') as f:
        info = json.load(f)
    check_fill(info)


if __name__ == '__main__':
    main()
    PgLoader.pool.close()
