import asyncio
from ccxt.pro import Exchange
from common.clickhouse_alc_engine import get_db, get_db_engine, get_kline_tb
from common.clickhouse_query import get_cut_range_from_table, get_kline_unfilled_range
from common.exchange import get_async_binance_portfolio_exchange_no_key
from common.kafka_message import get_kline_topic
from common.tools import safe_delayed
from common.variables import consider_symbols, time_counts, TimeFrameType

# from pytypes import typechecked
from datetime import datetime, timedelta
import joblib
import json
from kafka import KafkaProducer
import math
import pandas as pd
import threading
import time
from tqdm import tqdm


async def load_crypto_data(
    symbol: str,
    start_ms: int,
    end_ms: int,
    timeframe: TimeFrameType,
    exchange: Exchange = None,
) -> pd.DataFrame:
    # 这基本是单条线工作
    # data = pd.DataFrame(columns=["time", "open", "high", "low", "close", "volume"])
    data = []
    slide_time = start_ms
    exg = exchange
    if exchange is None:
        exg = get_async_binance_portfolio_exchange_no_key()
    while slide_time < end_ms:
        tmp_fetch = await exg.fetch_ohlcv(symbol, timeframe, slide_time, limit=5000)
        data.extend(tmp_fetch)
        if not tmp_fetch:
            break
        slide_time = tmp_fetch[-1][0] + 1
    if exchange is None:
        await exg.close()
    df = pd.DataFrame(data, columns=["time", "open", "high", "low", "close", "volume"])
    df.set_index("time", inplace=True)
    df = df[df.index < end_ms]
    return df


async def load_crypto_data_multitask(
    symbol: str,
    start_ms: int,
    end_ms: int,
    timeframe: TimeFrameType,
) -> pd.DataFrame:
    # 这里实现多条线工作
    exchange = get_async_binance_portfolio_exchange_no_key()
    maybe_counts = int(
        (end_ms - start_ms) / 1000 / time_counts[timeframe]
    )  # 估计的条数
    parts = min(10, math.ceil(maybe_counts / 1000))  # 保证每个任务不超过1000条
    delta = (end_ms - start_ms) // parts
    tasks = []
    for s in range(start_ms, end_ms, delta):
        tasks.append(
            load_crypto_data(symbol, s, min(s + delta, end_ms), timeframe, exchange)
        )
    results = await asyncio.gather(*tasks)
    await exchange.close()
    results = [x for x in results if not x.empty]
    if not results:
        return pd.DataFrame()
    df = pd.concat(results, axis=0)
    df = df.drop_duplicates().sort_index()
    df["symbol"] = symbol
    df.rename_axis("time", inplace=True)
    df.reset_index(inplace=True)

    table = get_kline_tb(timeframe)
    df.to_sql(table, get_db_engine(get_db()), index=False, if_exists="append")
    return df


def update_klines_check_once(
    start: str,
    end: str,
    symbol: str,
    timeframe: TimeFrameType,
):
    """
    :param start: isoformat
    :param end: isoformat
    :param symbol: "BTC/USDT"
    :param timeframe:
    :return:
    """
    asyncio.run(
        load_crypto_data_multitask(
            symbol,
            int(datetime.fromisoformat(start).timestamp() * 1000),
            int(datetime.fromisoformat(end).timestamp() * 1000),
            timeframe,
        )
    )


def add_batch_klines(
    symbol_list: list[str],
    start: str,
    end: str,
    timeframe: TimeFrameType,
    max_part: int = 1000,
    n_jobs: int = joblib.cpu_count(),
    verbose: int = 10,
    is_cover: bool = False,
):
    if n_jobs <= 1:
        for symbol in tqdm(symbol_list, desc="deal add_klines"):

            if is_cover:
                unfill_df = pd.DataFrame(
                    [
                        {
                            "start": start,
                            "end": end,
                            "delta": (
                                datetime.fromisoformat(end)
                                - datetime.fromisoformat(start)
                            ).seconds
                            / 60,
                        }
                    ]
                )
            else:
                unfill_df = get_kline_unfilled_range(start, end, symbol, timeframe)
            unfill_cuts = get_cut_range_from_table(unfill_df, timeframe, max_part)
            for xstart, xend in tqdm(unfill_cuts, desc=f"sub add_klines {symbol}"):
                xstart: datetime
                xend: datetime
                update_klines_check_once(
                    xstart.isoformat(), xend.isoformat(), symbol, timeframe
                )
        return

    job_params = []
    for symbol in symbol_list:
        unfill_df = get_kline_unfilled_range(start, end, symbol, timeframe)
        unfill_cuts = get_cut_range_from_table(unfill_df, timeframe, max_part)
        for xstart, xend in unfill_cuts:
            xstart: datetime
            xend: datetime
            job_params.append((xstart.isoformat(), xend.isoformat(), symbol, timeframe))
    pool = joblib.Parallel(n_jobs=n_jobs, verbose=verbose)
    pool(safe_delayed(update_klines_check_once)(*params) for params in job_params)


async def watch_ohlcv_data(
    symbol_list: list[str], timeframe: TimeFrameType, to_kafka: bool = False
):
    exchange = get_async_binance_portfolio_exchange_no_key()
    print(f"start watch ohlcv data {symbol_list} {timeframe}")
    engine = get_db_engine(get_db())
    tb = get_kline_tb(timeframe)
    complete_interval = False
    kafka_producer = None
    if to_kafka:
        kafka_producer = KafkaProducer(
            bootstrap_servers="xx_kafka:9092",  # Kafka 服务地址
            value_serializer=lambda v: json.dumps(v).encode(
                "utf-8"
            ),  # 序列化消息为 JSON 格式
        )
    while True:
        symbol_and_timeframe = [[symbol, timeframe] for symbol in symbol_list]

        results = await exchange.watch_ohlcv_for_symbols(
            symbol_and_timeframe, limit=1000
        )
        if not complete_interval:
            # 在启动收集数据时，启动和数据库已有间隙之间可能会漏掉数据，故此再次检查，可能会导致重复插入数据
            ed = datetime.now().isoformat()
            st = (datetime.now() - timedelta(minutes=5)).isoformat()
            th = threading.Thread(
                target=add_batch_klines,
                args=(consider_symbols, st, ed, timeframe, 100000, 1),
            )
            th.start()
            th.join()
            complete_interval = True
        for symbol, frame_data in results.items():
            cur_data = frame_data[timeframe]
            df = pd.DataFrame(
                columns=["time", "open", "high", "low", "close", "volume"],
                data=cur_data,
            )
            df["symbol"] = symbol
            df = df[df["time"] < (time.time() - 60) * 1000]
            if len(df) <= 0:
                continue
            df.to_sql(tb, engine, index=False, if_exists="append")
            if to_kafka:
                kafka_producer.send(
                    get_kline_topic(symbol, timeframe), df.to_dict(orient="records")
                )
            print(f"watch ohlv {timeframe}")
            print(df)


async def watch_ohlcv_data_timely(
    symbol_list: list[str], timeframe: TimeFrameType, to_kafka: bool = False
):
    exchange = get_async_binance_portfolio_exchange_no_key()
    print(f"start watch ohlcv data {symbol_list} {timeframe}")
    kafka_producer = None
    if to_kafka:
        kafka_producer = KafkaProducer(
            bootstrap_servers="xx_kafka:9092",  # Kafka 服务地址
            value_serializer=lambda v: json.dumps(v).encode(
                "utf-8"
            ),  # 序列化消息为 JSON 格式
        )

    start_kline_time = (time.time() // time_counts[timeframe]) * time_counts[timeframe]
    edt = datetime.fromtimestamp(start_kline_time)
    st = (edt - timedelta(minutes=5)).isoformat()
    th = threading.Thread(
        target=add_batch_klines,
        args=(consider_symbols, st, edt.isoformat(), timeframe, 10000, 1),
    )
    th.start()
    th.join()

    p = start_kline_time
    while True:
        if (d := p + time_counts[timeframe] - time.time()) >= 0:
            time.sleep(d)

        results = await asyncio.gather(
            *[
                exchange.fetch_ohlcv(symbol, timeframe, int(p * 1000), limit=1)
                for symbol in symbol_list
            ]
        )
        data = []
        for symbol, batch in zip(symbol_list, results):
            for r in batch:
                data.append([symbol, *r])
        df = pd.DataFrame(
            columns=["symbol", "time", "open", "high", "low", "close", "volume"],
            data=data,
        )
        # df.set_index("time", inplace=True)
        table = get_kline_tb(timeframe)
        df.to_sql(table, get_db_engine(get_db()), index=False, if_exists="append")
        print(f"watch ohlv {timeframe}")
        print(df)
        p = p + time_counts[timeframe]
        if to_kafka:
            for i, rr in df.iterrows():
                kafka_producer.send(
                    get_kline_topic(rr["symbol"], timeframe), [rr.to_dict()]
                )


if __name__ == "__main__":
    # update_klines_daily("20240101", "20240119", ["BTC/USDT", "ETH/USDT"], "1m")

    # update_klines_check_once("2025-02-10", "2025-02-19", "BTC/USDT", "1m")

    # ed = datetime.now().isoformat()
    # st = (datetime.now() - timedelta(days=1)).isoformat()
    # add_batch_klines(consider_symbols, st, ed, "1m", n_jobs=6, max_part=100000)
    # asyncio.get_event_loop().run_until_complete(
    #     watch_ohlcv_data(consider_symbols, "1m")
    # )

    print(time.time())
    result = asyncio.get_event_loop().run_until_complete(
        load_crypto_data("ATA/USDT", 1741605480000, int(time.time() * 1000), "1m")
    )
    print(result)
