import os
import time
import requests
import hashlib
import zipfile
import pandas as pd
import concurrent.futures
from itertools import product
from typing import List, Optional

# 从项目中导入依赖
from src.data_access.timescale_repository import TimescaleRepository
from src.enums.timeframe import Timeframe


def _verify_checksum(zip_path: str, checksum_path: str) -> bool:
    """验证文件的SHA256校验和。"""
    if not os.path.exists(zip_path) or not os.path.exists(checksum_path):
        return False
    try:
        with open(checksum_path, 'r') as f:
            expected_hash = f.read().strip().split()[0]

        sha256 = hashlib.sha256()
        with open(zip_path, 'rb') as f:
            for chunk in iter(lambda: f.read(65536), b''):
                sha256.update(chunk)
        calculated_hash = sha256.hexdigest()
        return calculated_hash == expected_hash
    except Exception as e:
        print(f"校验和验证时出错: {e}")
        return False


def _unzip_file(zip_path: str, extract_dir: str) -> Optional[str]:
    """解压ZIP文件并返回CSV文件的路径。"""
    try:
        os.makedirs(extract_dir, exist_ok=True)
        with zipfile.ZipFile(zip_path, 'r') as zip_ref:
            csv_filename = [name for name in zip_ref.namelist() if name.endswith('.csv')][0]
            zip_ref.extractall(extract_dir)
            return os.path.join(extract_dir, csv_filename)
    except (zipfile.BadZipFile, IndexError) as e:
        print(f"解压失败或未找到CSV文件: {e}")
    return None


def _download_file(url: str, save_path: str, timeout: int = 60) -> bool:
    """从URL下载文件，支持重试。"""
    os.makedirs(os.path.dirname(save_path), exist_ok=True)
    try:
        response = requests.get(url, stream=True, timeout=timeout)
        response.raise_for_status()
        with open(save_path, 'wb') as f:
            for chunk in response.iter_content(chunk_size=8192):
                f.write(chunk)
        return True
    except requests.exceptions.HTTPError as e:
        if e.response.status_code == 404:
            return False  # 404不是错误，是文件不存在，不重试
        print(f"HTTP错误: {e}")
    except requests.exceptions.RequestException as e:
        print(f"下载错误: {e}")
    return False  # 返回False以触发重试


class KlineDownloader:
    """
    一个健壮的K线数据下载器，负责从币安下载、验证、解析并存入TimescaleDB。
    - 支持多线程下载
    - 支持失败重试
    - 自动处理CSV Header
    - 与项目的Repository和Enum集成
    """
    BASE_URL = "https://data.binance.vision/data/futures/um/monthly/klines/{symbol}/{interval}/"

    # 币安官方CSV列名（无Header时使用）
    BINANCE_KLINE_COLUMNS = [
        'open_time', 'open', 'high', 'low', 'close', 'volume',
        'close_time', 'quote_volume', 'count', 'taker_buy_volume',
        'taker_buy_quote_volume', 'ignore'
    ]

    def __init__(self, repository: TimescaleRepository, base_save_path: str, max_workers: int = 10,
                 max_retries: int = 3):
        """
        初始化下载器。

        Args:
            repository: 用于数据存储的 TimescaleRepository 实例。
            base_save_path: 下载的zip和csv文件的根目录。
            max_workers: 最大并发下载线程数。
            max_retries: 单个文件下载/校验失败时的最大重试次数。
        """
        self.repo = repository
        self.base_save_path = base_save_path
        self.max_workers = max_workers
        self.max_retries = max_retries
        print(
            f"KlineDownloader 初始化完成。保存路径: {base_save_path}, 最大线程数: {max_workers}, 最大重试次数: {max_retries}")

    # (续) src/utils/kline_downloader.py

    # ... (在 KlineDownloader 类内部) ...

    # 在 KlineDownloader 类内部

    def _parse_csv_and_save_to_db(self, csv_path: str, symbol: str, timeframe: Timeframe):
        """
        解析CSV文件并将其内容保存到数据库，包含了针对 SQLAlchemy f405 错误的修复。
        """
        try:
            # 1. 智能Header检测 (这里不变)
            with open(csv_path, 'r', encoding='utf-8') as f:  # 显式指定encoding是个好习惯
                first_line = f.readline()
                has_header = 'open_time' in first_line.lower()

            if has_header:
                df = pd.read_csv(csv_path)
            else:
                df = pd.read_csv(csv_path, header=None, names=self.BINANCE_KLINE_COLUMNS)

            if df.empty:
                print(f"CSV文件为空: {os.path.basename(csv_path)}")
                return

            # 2. 数据预处理
            if 'ignore' in df.columns:
                df = df.drop(columns=['ignore'])

            # 将所有与价格和数量相关的列转换为数值类型，以防它们被错误地读取为字符串
            numeric_cols = ['open', 'high', 'low', 'close', 'volume', 'quote_volume', 'count', 'taker_buy_volume',
                            'taker_buy_quote_volume']
            for col in numeric_cols:
                if col in df.columns:
                    df[col] = pd.to_numeric(df[col], errors='coerce')

            # 将毫秒时间戳转换为带时区的datetime对象
            df['time'] = pd.to_datetime(df['open_time'], unit='ms', utc=True)

            # 添加 symbol 和 timeframe 列
            df['symbol'] = symbol
            df['timeframe'] = timeframe.value

            # 重命名 'count' 列
            if 'count' in df.columns:
                df.rename(columns={'count': 'trade_count'}, inplace=True)

            # 保留原始的 close_time (可选，如果数据库有 close_time_raw 列)
            if 'close_time' in df.columns:
                df.rename(columns={'close_time': 'close_time_raw'}, inplace=True)

            # ★★ 最终修复代码 ★★
            # 在这里，我们定义最终需要写入数据库的列的列表
            # 这个列表应该与你的数据库表结构完全对应
            final_columns = [
                'time', 'symbol', 'timeframe', 'open', 'high', 'low', 'close',
                'volume', 'quote_volume', 'trade_count', 'taker_buy_volume',
                'taker_buy_quote_volume', 'close_time_raw'
                # 如果你的表没有 source 列，就不要包含它
            ]

            # 创建一个新的 DataFrame，只包含这些最终的列
            # 并将 'time' 设为索引，以符合 save_klines 的要求
            df_to_save = df[final_columns].set_index('time')

            # 调试信息 (可选，确认无误后可注释掉)
            # print("--- [DEBUG] 最终准备写入数据库的DataFrame ---")
            # df_to_save.info()

            # 使用这个清理过的 DataFrame 进行数据入库
            self.repo.save_klines(df_to_save)

        except Exception as e:
            print(f"处理CSV并存入数据库时出错 ({os.path.basename(csv_path)}): {e}")

    def _process_single_task(self, symbol: str, timeframe: Timeframe, year: int, month: int) -> str:
        """处理单个下载任务的完整流程，包含重试逻辑。"""
        log_prefix = f"[{symbol}-{timeframe.value}-{year}-{month:02d}]"

        # 动态生成路径和URL
        interval_str = timeframe.value
        base_url = self.BASE_URL.format(symbol=symbol, interval=interval_str)
        zip_filename = f"{symbol}-{interval_str}-{year}-{month:02d}.zip"
        checksum_filename = f"{zip_filename}.CHECKSUM"

        symbol_base_path = os.path.join(self.base_save_path, symbol)
        zip_save_dir = os.path.join(symbol_base_path, "zip")
        extract_dir = os.path.join(symbol_base_path, interval_str)

        zip_url = base_url + zip_filename
        checksum_url = base_url + checksum_filename
        zip_save_path = os.path.join(zip_save_dir, zip_filename)
        checksum_save_path = os.path.join(zip_save_dir, checksum_filename)

        for attempt in range(self.max_retries):
            # 1. 下载ZIP文件
            zip_downloaded = _download_file(zip_url, zip_save_path)
            if not zip_downloaded:
                # 检查是否是404，如果是，则直接跳出循环
                if not os.path.exists(zip_save_path):
                    return f"{log_prefix} Skipped (404 Not Found)"
                print(f"{log_prefix} ZIP下载失败，将在1秒后重试 (尝试 {attempt + 1}/{self.max_retries})...")
                time.sleep(1)
                continue

            # 2. 下载Checksum文件
            checksum_downloaded = _download_file(checksum_url, checksum_save_path)
            if not checksum_downloaded:
                print(f"{log_prefix} Checksum下载失败，将在1秒后重试 (尝试 {attempt + 1}/{self.max_retries})...")
                time.sleep(1)
                continue

            # 3. 验证校验和
            if _verify_checksum(zip_save_path, checksum_save_path):
                # 4. 解压
                csv_file_path = _unzip_file(zip_save_path, extract_dir)
                if csv_file_path:
                    # 5. 解析并存入数据库
                    self._parse_csv_and_save_to_db(csv_file_path, symbol, timeframe)
                    # 成功后清理文件
                    os.remove(zip_save_path)
                    os.remove(checksum_save_path)
                    return f"{log_prefix} Success"
                else:
                    return f"{log_prefix} Failed (Unzip error)"
            else:
                print(f"{log_prefix} 校验和不匹配！将在1秒后重试 (尝试 {attempt + 1}/{self.max_retries})...")
                os.remove(zip_save_path)  # 删除损坏的zip文件
                time.sleep(1)
                continue

        return f"{log_prefix} Failed (After {self.max_retries} retries)"

    def run(self, symbols: List[str], timeframes: List[Timeframe], start_date_str: str, end_date_str: str):
        """
        运行多线程下载任务的主函数。
        """
        start_time = time.time()

        # 1. 生成所有日期
        dates = []
        start_year, start_month = map(int, start_date_str.split('-'))
        end_year, end_month = map(int, end_date_str.split('-'))
        year, month = start_year, start_month
        while year < end_year or (year == end_year and month <= end_month):
            dates.append((year, month))
            month += 1
            if month > 12:
                month = 1
                year += 1

        # 2. 创建所有任务组合
        tasks_to_run = list(product(symbols, timeframes, dates))
        total_tasks = len(tasks_to_run)
        print(f"总共需要处理 {total_tasks} 个文件任务...")

        # 3. 使用ThreadPoolExecutor执行
        with concurrent.futures.ThreadPoolExecutor(max_workers=self.max_workers) as executor:
            future_to_task = {
                executor.submit(self._process_single_task, symbol, timeframe, date[0], date[1]): (
                    symbol, timeframe.value, date)
                for symbol, timeframe, date in tasks_to_run
            }

            completed_count = 0
            for future in concurrent.futures.as_completed(future_to_task):
                try:
                    result = future.result()
                    # print(result)  # 可以取消注释来查看每个任务的最终状态
                except Exception as exc:
                    print(f"任务生成了异常: {exc}")

                completed_count += 1
                print(f"进度: {completed_count}/{total_tasks} ({completed_count / total_tasks:.2%})", end='\r')

        print(f"\n\n所有任务已完成，总耗时: {time.time() - start_time:.2f} 秒。")


if __name__ == '__main__':
    # --- 配置 ---
    DB_URL = "postgresql://postgres:postgres@localhost:15432/crypto?client_encoding=utf8"
    BASE_SAVE_PATH = r"F:\personal\binance_klines"

    SYMBOLS_TO_DOWNLOAD = [
        "ETHUSDT",
        # "BTCUSDT",
        # "BNBUSDT",
        # "XRPUSDT",
        # "XLMUSDT",
        # "TRXUSDT",
        # "HYPEUSDT",
        # "SUIUSDT",
        # "LTCUSDT",
        # "ENAUSDT",
        # "BCHUSDT",
        # "UNIUSDT",
        # "AVAXUSDT",
        # "MNTUSDT",
        # "LINKUSDT",
        # "PUMPUSDT",
        # "TONUSDT",
        # "HBARUSDT",
        # "AAVEUSDT",
    ]
    TIMEFRAMES_TO_DOWNLOAD = [
        # Timeframe.TIMEFRAME_15M,
        Timeframe.TIMEFRAME_1H,
        # Timeframe.TIMEFRAME_4H,
        # Timeframe.TIMEFRAME_5M,
        # Timeframe.TIMEFRAME_1M,
        # Timeframe.TIMEFRAME_30M,
        # Timeframe.TIMEFRAME_1D,
    ]
    START_DATE = "2024-06"
    END_DATE = "2024-06"

    # 1. 初始化 Repository
    try:
        repo = TimescaleRepository(db_url=DB_URL)
    except Exception:
        print("无法连接到数据库，请检查 DB_URL 和数据库服务。")
        raise

    # 2. 初始化 Downloader
    downloader = KlineDownloader(
        repository=repo,
        base_save_path=BASE_SAVE_PATH,
        max_workers=10,
        max_retries=3
    )

    # 3. 运行下载任务
    downloader.run(
        symbols=SYMBOLS_TO_DOWNLOAD,
        timeframes=TIMEFRAMES_TO_DOWNLOAD,
        start_date_str=START_DATE,
        end_date_str=END_DATE)
