import requests
import hashlib
import zipfile
import os
import time
import concurrent.futures
from itertools import product
import calendar
import pandas as pd
import glob

# --- 配置 ---
SYMBOLS = ["ETHUSDT"]
TIME_INTERVALS = [
    "1h", "5m"
]
START_DATE_STR = "2025-08"
END_DATE_STR = "2025-08"

MAX_WORKERS = 10

# --- 路径配置 ---
BASE_SAVE_PATH_DAILY_ROOT = r"F:\personal\klines_by_day\\"
BASE_SAVE_PATH_MONTHLY_ROOT = r"F:\personal\binance_klines\\"


# --- 辅助函数 (无修改) ---
def download_file(url, save_path):
    try:
        os.makedirs(os.path.dirname(save_path), exist_ok=True)
        response = requests.get(url, stream=True, timeout=60)
        response.raise_for_status()
        with open(save_path, 'wb') as f:
            for chunk in response.iter_content(chunk_size=8192):
                f.write(chunk)
        return True
    except requests.exceptions.HTTPError as e:
        if e.response.status_code == 404:
            pass
        else:
            print(f"[{os.path.basename(save_path)}] HTTP错误, 下载失败: {e}")
    except requests.exceptions.RequestException as e:
        print(f"[{os.path.basename(save_path)}] 下载错误: {e}")
    except Exception as e:
        print(f"[{os.path.basename(save_path)}] 下载时发生未知错误: {e}")
    return False

def verify_checksum(zip_file_path, checksum_file_path):
    if not os.path.exists(zip_file_path) or not os.path.exists(checksum_file_path):
        return False
    try:
        with open(checksum_file_path, 'r') as f:
            expected_hash = f.read().strip().split()[0]
        sha256 = hashlib.sha256()
        with open(zip_file_path, 'rb') as f:
            for chunk in iter(lambda: f.read(65536), b''):
                sha256.update(chunk)
        calculated_hash = sha256.hexdigest()
        return calculated_hash == expected_hash
    except Exception as e:
        print(f"[{os.path.basename(zip_file_path)}] 验证校验和时出错: {e}")
        return False

def unzip_file(zip_file_path, extract_to_dir):
    try:
        os.makedirs(extract_to_dir, exist_ok=True)
        with zipfile.ZipFile(zip_file_path, 'r') as zip_ref:
            zip_ref.extractall(extract_to_dir)
        return True
    except zipfile.BadZipFile:
        print(f"错误: {os.path.basename(zip_file_path)} 是损坏的ZIP文件")
    except Exception as e:
        print(f"解压 {os.path.basename(zip_file_path)} 时出错: {e}")
    return False


# --- 核心下载任务函数 (无修改) ---
def process_daily_download_task(symbol, interval, year_str, month_str, day_str):
    date_str = f"{year_str}-{month_str}-{day_str}"
    log_prefix = f"[{symbol}-{interval}-{date_str}]"
    base_url = f"https://data.binance.vision/data/futures/um/daily/klines/{symbol}/{interval}/"
    zip_filename = f"{symbol}-{interval}-{date_str}.zip"
    checksum_filename = f"{zip_filename}.CHECKSUM"
    csv_filename = f"{symbol}-{interval}-{date_str}.csv"
    zip_url = base_url + zip_filename
    checksum_url = base_url + checksum_filename
    daily_save_path = os.path.join(BASE_SAVE_PATH_DAILY_ROOT, symbol, interval)
    zip_save_dir = os.path.join(daily_save_path, "zip")
    final_zip_save_path = os.path.join(zip_save_dir, zip_filename)
    checksum_save_path = os.path.join(zip_save_dir, checksum_filename)
    extract_dir = daily_save_path
    expected_csv_path = os.path.join(extract_dir, csv_filename)

    if os.path.exists(expected_csv_path):
        return f"{log_prefix} Skipped (already exists)"

    print(f"{log_prefix} 开始下载...")
    zip_downloaded = download_file(zip_url, final_zip_save_path)
    if not zip_downloaded:
        if not os.path.exists(final_zip_save_path):
            return f"{log_prefix} Skipped (404 Not Found)"
        else:
            return f"{log_prefix} Failed (ZIP download error)"

    checksum_downloaded = download_file(checksum_url, checksum_save_path)
    if not checksum_downloaded:
        print(f"{log_prefix} CHECKSUM下载失败，无法验证。")
        return f"{log_prefix} Failed (Checksum download error)"

    if verify_checksum(final_zip_save_path, checksum_save_path):
        if unzip_file(final_zip_save_path, extract_dir):
            print(f"{log_prefix} 成功下载并解压。")
            status = f"{log_prefix} Success"
        else:
            print(f"{log_prefix} 校验通过但解压失败。")
            status = f"{log_prefix} Failed (Unzip error)"
    else:
        print(f"{log_prefix} 校验和验证失败！")
        try: os.remove(final_zip_save_path)
        except OSError as e: print(f"{log_prefix} 无法删除校验失败的ZIP文件: {e}")
        status = f"{log_prefix} Failed (Checksum mismatch)"

    for f in [final_zip_save_path, checksum_save_path]:
        if os.path.exists(f):
            try: os.remove(f)
            except OSError as e: print(f"{log_prefix} 无法删除临时文件 {os.path.basename(f)}: {e}")
    return status


# --- 合并CSV函数 (最终健壮版，可处理表头和列偏移) ---
def merge_daily_csvs_to_monthly(symbol, interval, year_str, month_str):
    """
    将指定月份的所有日度CSV合并为一个大的月度CSV文件。
    【最终版】: 此版本能正确处理混杂的表头行和列偏移问题，并覆盖已存在的文件。
    """
    log_prefix = f"[Merge {symbol}-{interval}-{year_str}-{month_str}]"

    daily_csv_dir = os.path.join(BASE_SAVE_PATH_DAILY_ROOT, symbol, interval)
    monthly_csv_dir = os.path.join(BASE_SAVE_PATH_MONTHLY_ROOT, symbol, interval)
    monthly_csv_filename = f"{symbol}-{interval}-{year_str}-{month_str}.csv"
    monthly_csv_path = os.path.join(monthly_csv_dir, monthly_csv_filename)

    os.makedirs(monthly_csv_dir, exist_ok=True)

    search_pattern = os.path.join(daily_csv_dir, f"{symbol}-{interval}-{year_str}-{month_str}-*.csv")
    daily_files = sorted(glob.glob(search_pattern))

    if not daily_files:
        return

    print(f"{log_prefix} 找到 {len(daily_files)} 个日度文件, 开始合并并深度清理格式(覆盖)...")

    try:
        # 1. 读取所有文件到一个DataFrame列表中，不设表头
        list_of_dfs = [pd.read_csv(f, header=None, engine='python') for f in daily_files]
        if not list_of_dfs:
            return

        # 2. 合并成一个大的、未处理的DataFrame
        messy_df = pd.concat(list_of_dfs, ignore_index=True)

        # 3. **核心步骤：识别并过滤掉表头行**
        #    - 找到每行的第一个非空值
        #    - 尝试将这个值转为数字，如果失败（结果为NaT/NaN），则判定该行为表头
        first_values = messy_df.apply(lambda row: row.dropna().iloc[0] if not row.dropna().empty else None, axis=1)
        # 创建一个布尔掩码，is_header_mask中为True的行是表头
        is_header_mask = pd.to_numeric(first_values, errors='coerce').isna()

        # 使用掩码的反向（~）来只选择数据行
        data_only_df = messy_df[~is_header_mask]

        # 4. 对纯数据行进行列偏移清理
        #    这个lambda函数现在只会处理真正的数据
        cleaned_df = data_only_df.apply(lambda row: pd.Series(row.dropna().values), axis=1)

        # 5. 定义并设置正确的列名
        header_names = [
            'open_time', 'open', 'high', 'low', 'close', 'volume',
            'close_time', 'quote_volume', 'count', 'taker_buy_volume',
            'taker_buy_quote_volume', 'ignore'
        ]

        # 确保列数正确
        if cleaned_df.shape[1] > 12:
            cleaned_df = cleaned_df.iloc[:, :12]
        cleaned_df.columns = header_names

        # 6. 转换数据类型以确保格式正确（特别是防止时间戳出现科学记数法）
        numeric_cols = ['open', 'high', 'low', 'close', 'volume', 'quote_volume', 'taker_buy_volume', 'taker_buy_quote_volume']
        for col in header_names:
            if col in ['open_time', 'close_time', 'count', 'ignore']:
                # 将时间戳和计数转为整数
                cleaned_df[col] = pd.to_numeric(cleaned_df[col]).astype('int64')
            elif col in numeric_cols:
                # 其他数值转为浮点数
                cleaned_df[col] = pd.to_numeric(cleaned_df[col]).astype('float')

        # 7. 保存最终的、干净的月度文件，并写入我们指定的唯一表头
        cleaned_df.to_csv(monthly_csv_path, index=False, header=True)

        print(f"{log_prefix} 成功合并、清理并保存到 {monthly_csv_path}")

    except Exception as e:
        print(f"{log_prefix} 合并时发生严重错误: {e}")

# --- 主逻辑 (无修改) ---
def main():
    total_start_time = time.time()

    print("--- 阶段一: 开始下载日度K线数据 ---")
    download_start_time = time.time()

    daily_tasks_args = []
    start_year, start_month = map(int, START_DATE_STR.split('-'))
    end_year, end_month = map(int, END_DATE_STR.split('-'))
    year, month = start_year, start_month
    while year < end_year or (year == end_year and month <= end_month):
        num_days = calendar.monthrange(year, month)[1]
        for day in range(1, num_days + 1):
            daily_tasks_args.append((str(year), f"{month:02d}", f"{day:02d}"))
        month += 1
        if month > 12:
            month = 1
            year += 1

    tasks_to_run = list(product(SYMBOLS, TIME_INTERVALS, daily_tasks_args))
    total_tasks = len(tasks_to_run)
    print(f"总共需要下载 {total_tasks} 个日度文件任务。")
    print(f"使用 {MAX_WORKERS} 个并发线程。")

    with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
        future_to_task = {
            executor.submit(process_daily_download_task, s, i, d[0], d[1], d[2]): (s, i, d)
            for s, i, d in tasks_to_run
        }
        completed_count = 0
        for future in concurrent.futures.as_completed(future_to_task):
            completed_count += 1
            print(f"下载进度: {completed_count}/{total_tasks} ({completed_count / total_tasks:.2%})", end='\r')

    download_end_time = time.time()
    print(f"\n\n--- 阶段一完成: 所有下载任务已处理，耗时: {download_end_time - download_start_time:.2f} 秒。---\n")

    print("--- 阶段二: 开始合并日度数据为月度文件 ---")
    merge_start_time = time.time()

    monthly_tasks_args = []
    year, month = start_year, start_month
    while year < end_year or (year == end_year and month <= end_month):
        monthly_tasks_args.append((str(year), f"{month:02d}"))
        month += 1
        if month > 12:
            month = 1
            year += 1

    merge_tasks = list(product(SYMBOLS, TIME_INTERVALS, monthly_tasks_args))
    for i, (symbol, interval, date) in enumerate(merge_tasks):
        print(f"合并进度: {i + 1}/{len(merge_tasks)}")
        merge_daily_csvs_to_monthly(symbol, interval, date[0], date[1])

    merge_end_time = time.time()
    print(f"\n--- 阶段二完成: 所有合并任务已处理，耗时: {merge_end_time - merge_start_time:.2f} 秒。---\n")

    total_end_time = time.time()
    print(f"所有任务已完成，总耗时: {total_end_time - total_start_time:.2f} 秒。")

if __name__ == "__main__":
    main()