import os
import pandas as pd
from utils import df_into_db, read_sql, batch_insert_dataframe
import datetime, time
from typing import List, Tuple
from concurrent.futures import ThreadPoolExecutor
import multiprocessing
import pandas as pd
import pymysql
import tempfile
import os


def get_symbol_info():
    info_df = read_sql("select * from binance_symbol_info", db_name="binance")
    return info_df


def get_exist_symbols(frequency, hash_value=None):
    table_name = f"binance_k_line_{frequency}"
    if hash_value:
        table_name = f"{table_name}_{hash_value}"
    df = read_sql(f"select distinct symbol from {table_name} where frequency='{frequency}'", db_name="binance")
    return set(df.symbol)

def get_table_count(frequency, hash_value=None):
    table_name = f"binance_k_line_{frequency}"
    if hash_value:
        table_name = f"{table_name}_{hash_value}"
    df = read_sql(f"select count(*) as count from {table_name}", db_name="binance")
    print(f"{table_name},count:{df.iloc[0]['count']}")

def file_into_db(frequency, symbols, hash_value):
    print(f"处理{frequency}第{hash_value}批")
    path = f"G:/binance_k_line/v1/{frequency}"
    all_df = pd.DataFrame()
    for symbol in symbols:
        file = os.path.join(path, f"binance_{symbol}_kline_{frequency}.csv")
        if not os.path.exists(file):
            print(f"{file} 不存在")
            continue
        tmp_df = pd.read_csv(file)
        all_df = pd.concat([all_df, tmp_df], ignore_index=True)
        print(f"{frequency}第{hash_value}批长度:{len(all_df)},columns:{all_df.columns.tolist()}")
    if hash_value == 0:
        table_name = f"binance_k_line_{frequency}"
    else:
        table_name = f"binance_k_line_{frequency}_{hash_value}"
    batch_insert_dataframe(all_df, db_name="binance", table_name=table_name)


def check_data(frequency):
    assert frequency in ("5m", "1h")
    if frequency == "5m":
        grp_column = "five_minute_hash"
    else:
        grp_column = "one_hour_hash"
    info_df = get_symbol_info()
    params = []
    path = f"G:/binance_k_line/v1/{frequency}"
    # for hash_value, group in sorted(info_df.groupby(grp_column), reverse=True)
    for hash_value, group in info_df.groupby(grp_column):
        print(hash_value)
        symbols = group['symbol'].tolist()
        all_df = pd.DataFrame()
        for symbol in symbols:
            file = os.path.join(path, f"binance_{symbol}_kline_{frequency}.csv")
            if not os.path.exists(file):
                print(f"{file} 不存在")
                continue
            tmp_df = pd.read_csv(file)
            all_df = pd.concat([all_df, tmp_df], ignore_index=True)
        print(f"{frequency}第{hash_value}批长度:{len(all_df)},columns:{all_df.columns.tolist()}")
        get_table_count(frequency, hash_value)


def main(frequency, multiprocess=False):
    info_df = get_symbol_info()
    if frequency == "1d":
        sybmols = info_df["symbol"].tolist()
        file_into_db(frequency,sybmols, 0)
    else:
        if frequency == "5m":
            grp_column = "five_minute_hash"
        else:
            grp_column = "one_hour_hash"

        params = []
        for hash_value, group in sorted(info_df.groupby(grp_column), reverse=True):
            print(hash_value)
            params.append((frequency, group['symbol'].tolist(), hash_value))
            print(f"{datetime.datetime.now()} {frequency}: {params[-1]}")
            if not multiprocess:
                file_into_db(*params[-1])
        if multiprocess:
            with multiprocessing.Pool(60) as pool:
                pool.starmap(file_into_db, params)
    print(f"{datetime.datetime.now()} {frequency}: 执行成功")


if __name__ == '__main__':
    # symbols = ["ETHUSDT", "ETHBTC", "BNBUSDT", "BNBBTC", "XRPUSDT", "XRPBTC", "SOLUSDT", "TRXUSDT", "DOGEUSDT", "SOLBTC", "TRXBTC", "DOGEBTC"]
    main("1d")
    # check_data("5m")