import os
import pandas as pd
from utils import df_into_db, read_sql
import datetime
import multiprocessing
import pandas as pd
import os
import warnings
warnings.filterwarnings("ignore")


def get_symbol_info():
    info_df = read_sql("select * from binance_symbol_info where trading = 1", db_name="binance")
    return info_df


def get_latest_timestamp(frequency, hash_value=None):
    table_name = f"binance_k_line_{frequency}"
    if hash_value:
        table_name = f"{table_name}_{hash_value}"
    df = read_sql(f"select symbol,max(timestamp) as max_timestamp from {table_name} where frequency='{frequency}' group by symbol", 
                  db_name="binance")
    ret_map = dict(zip(df['symbol'], df['max_timestamp']))
    return ret_map


def file_into_db(frequency, symbols, hash_value=None):
    gap_map = {
        "1d": 24*3600,
        "1h": 3600,
        "5m": 5*60
    }
    max_db_time_map = get_latest_timestamp(frequency, hash_value)
    print(f"处理{frequency}第{hash_value}批")
    path = f"G:/binance_k_line/v2/{frequency}"
    all_df = pd.DataFrame()
    for symbol in symbols:
        file = os.path.join(path, f"binance_{symbol}_kline_{frequency}.csv")
        if not os.path.exists(file):
            print(f"{file} 不存在")
            continue
        tmp_df = pd.read_csv(file)
        latest_time = max_db_time_map.get(symbol)
        if latest_time is None:
            print(f"{symbol} 之前不在数据库中")
        else:
            gap_time = int((tmp_df["timestamp"].min() - latest_time)/1000)
            assert gap_time == gap_map[frequency], "数据必须是连续的"
        all_df = pd.concat([all_df, tmp_df], ignore_index=True)
        print(f"{frequency}第{hash_value}批长度:{len(all_df)},columns:{all_df.columns.tolist()}")
    if hash_value is None:
        table_name = f"binance_k_line_{frequency}"
    else:
        table_name = f"binance_k_line_{frequency}_{hash_value}"
    df_into_db(all_df, db_name="binance", table_name=table_name)


def main(frequency, multiprocess=False):
    info_df = get_symbol_info()
    if frequency == "1d":
        sybmols = info_df["symbol"].tolist()
        file_into_db(frequency,sybmols)
    else:
        if frequency == "5m":
            grp_column = "five_minute_hash"
        else:
            grp_column = "one_hour_hash"
        params = []
        for hash_value, group in info_df.groupby(grp_column):
            print(hash_value)
            params.append((frequency, group['symbol'].tolist(), hash_value))
            print(f"{datetime.datetime.now()} {frequency}: {params[-1]}")
            if not multiprocess:
                file_into_db(*params[-1])
        if multiprocess:
            with multiprocessing.Pool(60) as pool:
                pool.starmap(file_into_db, params)
    print(f"{datetime.datetime.now()} {frequency}: 执行成功")


if __name__ == '__main__':
    main("5m")
