#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
批量导入脚本：从本地 data 目录扫描文件，调用 model 目录中的导入方法。

用法示例：
  python BatchImport.py                              # 默认扫描 ./data 下所有支持类型
  python BatchImport.py --dir ../data/kline          # 指定目录
  python BatchImport.py --types hsnapshot,block      # 指定导入类型
  python BatchImport.py --dry-run                    # 仅解析与统计，不写入数据库
"""

import os
import sys
import argparse
from datetime import datetime
from typing import Dict, List, Tuple

import pandas as pd
from loguru import logger

# 将工作目录切到脚本所在目录，确保相对路径一致
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
os.chdir(SCRIPT_DIR)

# 本地导入 Model 中的类
from model import (
    DatabaseManager,
    HSnapshot,
    Block,
    BlockInfo,
    FBoard,
)
from model.BaseModel import standardize_date_format


def configure_logging(verbose: bool = False) -> None:
    """配置日志输出到控制台。"""
    logger.remove()
    level = "DEBUG" if verbose else "INFO"
    logger.add(sys.stdout, level=level, format="{time:YYYY-MM-DD HH:mm:ss} | {level} | {message}")


def init_models() -> DatabaseManager:
    """初始化数据库与模型的 db_manager 绑定。"""
    db_manager = DatabaseManager()
    db_manager.init_databases()
    HSnapshot.set_db_manager(db_manager)
    Block.set_db_manager(db_manager)
    BlockInfo.set_db_manager(db_manager)
    FBoard.set_db_manager(db_manager)
    return db_manager


def detect_file_type_by_name(filename: str) -> str:
    """根据文件名初步判断类型。"""
    name = filename.lower()
    if name.startswith("hsnapshot"):
        return "hsnapshot"
    elif name.startswith("blockinfo"):
        return "blockinfo"
    elif name.startswith("block"):
        return "block"
    elif name.startswith("fboard"):
        return "fboard"
    return "unknown"


# 定义证券代码相关的列名，确保以字符串格式读取
SECURITY_COLUMNS = {
    'securityId': str,
    'security_id': str,
    'security_code': str,
    '证券代码': str,
    '代码': str,
    'tradeDate': int

}

def read_any_table(file_path: str) -> pd.DataFrame:
    """尽力读取表格文件，优先 csv，其次 xlsx。"""
    ext = os.path.splitext(file_path)[1].lower()
    if ext in [".xlsx", ".xls"]:
        df =  pd.read_excel(file_path)
        df.columns = df.columns.str.strip()
        return df
    if ext in [".csv", ".txt"]:    
        df = pd.read_csv(file_path, encoding="utf-8",  dtype=SECURITY_COLUMNS)
        df.columns = df.columns.str.strip()
        return df


def import_hsnapshot(file_path: str, dry_run: bool = False) -> Tuple[str, int]:
    """导入 HSnapshot 类型文件。返回 (文件名, 导入条数)。"""
    df = read_any_table(file_path)
    # 去掉列名左右的空格
    df = df.rename(columns = HSnapshot.aliases)
    # 标准化日期格式
    if 'trade_date' in df.columns:
        df['trade_date'] = df['trade_date'].apply(standardize_date_format)
        # 过滤掉无效日期
        df = df[df['trade_date'].notna()]

    if dry_run:
        return (os.path.basename(file_path), len(df))

    count = HSnapshot.import_from_dataframe(df)
    return (os.path.basename(file_path), int(count or 0))


def import_block(file_path: str, dry_run: bool = False) -> Tuple[str, int]:
    df = read_any_table(file_path)
   
    df = df.rename(columns = Block.aliases)

    if dry_run:
        return (os.path.basename(file_path), len(df))

    count = Block.import_from_dataframe(df)
    return (os.path.basename(file_path), int(count or 0))


def import_blockinfo(file_path: str, dry_run: bool = False) -> Tuple[str, int]:
    df = read_any_table(file_path)
  
    df = df.rename(columns = BlockInfo.aliases)
    if len(df.columns) >= 3:
        df.columns = ['block_code', 'security_id', 'market']
    else:
        raise ValueError("blockinfo 文件列数不足，需至少3列")

    if dry_run:
        return (os.path.basename(file_path), len(df))

    count = BlockInfo.import_from_dataframe(df)
    return (os.path.basename(file_path), int(count or 0))


def import_fboard(file_path: str, dry_run: bool = False) -> Tuple[str, int]:
    df = read_any_table(file_path)
    
    df = df.rename(columns=FBoard.aliases)
    # 标准化日期格式
    if 'trade_date' in df.columns:
        df['trade_date'] = df['trade_date'].apply(standardize_date_format)
        # 过滤掉无效日期
        df = df[df['trade_date'].notna()]

    if dry_run:
        return (os.path.basename(file_path), len(df))

    count = FBoard.import_from_dataframe(df)
    return (os.path.basename(file_path), int(count or 0))


def gather_files(target_dir: str, allow_types: List[str]) -> List[Tuple[str, str]]:
    """收集给定目录下的候选文件 (path, type)。"""
    candidates: List[Tuple[str, str]] = []
    for root, _dirs, files in os.walk(target_dir):
        for name in files:
            if os.path.splitext(name)[1].lower() not in [".csv", ".txt", ".xlsx", ".xls"]:
                continue
            t = detect_file_type_by_name(name)
            if t == "unknown":
                continue
            if allow_types and t not in allow_types:
                continue
            candidates.append((os.path.join(root, name), t))
    return sorted(candidates, key=lambda x: x[0])


def main():
    parser = argparse.ArgumentParser(description="批量导入本地数据到SQLite")
    parser.add_argument("--dir", dest="directory", default=os.path.join(SCRIPT_DIR, "data"), help="数据目录，默认 ./data")
    parser.add_argument("--types", dest="types", default="hsnapshot,block,blockinfo,fboard", help="导入类型，逗号分隔")
    parser.add_argument("--dry-run", dest="dry_run", action="store_true", help="仅解析与统计，不写入数据库")
    parser.add_argument("--verbose", dest="verbose", action="store_true", help="输出更多日志")
    args = parser.parse_args()

    configure_logging(verbose=args.verbose)

    directory = os.path.abspath(args.directory)
    allow_types = [t.strip() for t in args.types.split(',') if t.strip()]
    logger.info(f"导入目录: {directory}")
    logger.info(f"导入类型: {allow_types}")
    logger.info(f"DryRun: {args.dry_run}")

    if not os.path.isdir(directory):
        logger.error(f"目录不存在: {directory}")
        sys.exit(1)

    # 初始化数据库
    init_models()

    files = gather_files(directory, allow_types)
    if not files:
        logger.warning("未发现待导入文件")
        return

    total_imported = 0
    errors: List[str] = []
    details: List[Tuple[str, str, int]] = []  # (name, type, count)

    for file_path, ftype in files:
        name = os.path.basename(file_path)
        try:
            if ftype == 'hsnapshot':
                fname, count = import_hsnapshot(file_path, args.dry_run)
            elif ftype == 'block':
                fname, count = import_block(file_path, args.dry_run)
            elif ftype == 'blockinfo':
                fname, count = import_blockinfo(file_path, args.dry_run)
            elif ftype == 'fboard':
                fname, count = import_fboard(file_path, args.dry_run)
            else:
                logger.debug(f"跳过未知类型文件: {name}")
                continue

            details.append((fname, ftype, count))
            total_imported += count
            logger.info(f"导入成功: [{ftype}] {fname} -> {count} 条")
        except Exception as e:
            msg = f"导入失败: [{ftype}] {name}: {e}"
            errors.append(msg)
            logger.error(msg)

    # 汇总
    logger.info("================ 导入汇总 ================")
    for fname, ftype, count in details:
        logger.info(f"{fname:40s}  {ftype:10s}  {count:6d}")
    logger.info(f"总导入条数: {total_imported}")
    if errors:
        logger.warning("存在失败项：")
        for e in errors:
            logger.warning(e)


if __name__ == "__main__":
    main()


