#!/usr/bin/env python3
"""合并数据（包装脚本）

此脚本保留 smoke 的开始/结束样式与日志配置，并调用 `scripts/merge1.py` 的功能来扫描 `data/` 下的 CSV 并比较字段集合。
"""
from pathlib import Path
import sys
import os

# Ensure project root is on sys.path
ROOT = Path(__file__).resolve().parent.parent
sys.path.insert(0, str(ROOT))

try:
    from src.bootstrap_helpers import start, end, set_plot_style, get_logger, load_config_from_file
except Exception:
    # minimal fallbacks
    def get_logger(name: str = "合并数据", level: str = "INFO"):
        import logging
        logger = logging.getLogger(name)
        if not logger.handlers:
            h = logging.StreamHandler()
            fmt = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
            h.setFormatter(fmt)
            logger.addHandler(h)
        logger.setLevel(level)
        return logger

    def start(label: str = None):
        import time
        t = time.time()
        try:
            from src.bootstrap_helpers import start as _s
            return _s(label)
        except Exception:
            print(f"开始 {label or ''}")
            return t

    def end(start_time: float, label: str = None):
        import time
        try:
            from src.bootstrap_helpers import end as _e
            return _e(start_time, label)
        except Exception:
            print(f"结束 {label or ''} 耗时 {int(time.time()-start_time)}s")

    def set_plot_style(*args, **kwargs):
        return

    def load_config_from_file(path: str = "config.yaml"):
        return {}

logger = get_logger('合并数据')


def main(argv=None):
    import argparse
    parser = argparse.ArgumentParser(description='合并数据：扫描 data 下 CSV 并比较字段集合（包装脚本）')
    parser.add_argument('--dir', '-d', default='data', help='data 目录')
    parser.add_argument('--recursive', '-r', action='store_true', help='递归扫描')
    parser.add_argument('--verbose', '-v', action='store_true', help='详细日志')
    parser.add_argument('--no-run', action='store_true', help='生成但不实际执行主要计算')
    parser.add_argument('--output', '-o', default=None, help='输出合并后 CSV 路径（默认 outputs/merged_YYYYmmdd_HHMMSS.csv）')
    args = parser.parse_args(argv)

    # config and logging
    cfg = load_config_from_file('config.yaml') if callable(load_config_from_file) else {}
    log_level = 'DEBUG' if args.verbose else cfg.get('log_level', os.environ.get('LOG_LEVEL', 'INFO'))
    logger.setLevel(log_level)

    TASK_LABEL = '区县基本数据合并'
    start_time = None
    try:
        start_time = start(TASK_LABEL)
    except Exception:
        start_time = None

    if args.no_run:
        logger.info('no-run 模式，跳过主要计算')
        if callable(end) and start_time is not None:
            end(start_time, TASK_LABEL)
        return 0

    # apply style hooks if available (kept for consistency though no plotting here)
    try:
        set_plot_style()
    except Exception:
        pass

    # Build argv for merge1 and execute its main implementation (load by file)
    merge_args = ['--dir', args.dir]
    if args.recursive:
        merge_args.append('--recursive')
    if args.verbose:
        merge_args.append('--verbose')

    try:
        from importlib.util import spec_from_file_location, module_from_spec
        spec = spec_from_file_location('merge1_impl', str(ROOT / 'scripts' / 'merge1.py'))
        merge_mod = module_from_spec(spec)
        loader = spec.loader
        if loader is None:
            raise ImportError('无法加载 merge1 模块')
        loader.exec_module(merge_mod)

        # We will reuse merge_mod.inspect_file and merge_mod.render_table but orchestrate
        # the file iteration here so we can show a rich progress bar.
        inspect_file = getattr(merge_mod, 'inspect_file')
        render_table = getattr(merge_mod, 'render_table')

        # find files similar to merge1 logic
        data_dir = Path(args.dir)
        if args.recursive:
            files = [p for p in data_dir.rglob('*') if p.is_file()]
        else:
            files = [p for p in data_dir.iterdir() if p.is_file()]

        csv_files = []
        for p in sorted(files):
            if p.name == '.DS_Store':
                continue
            if p.suffix.lower() in ('.csv', '.tsv') or p.suffix == '':
                csv_files.append(p)
            else:
                try:
                    with p.open('r', encoding='utf-8') as f:
                        first = f.readline(256)
                    if first and (',' in first or '\t' in first):
                        csv_files.append(p)
                except Exception:
                    continue

        # process with a rich progress bar
        from rich.progress import Progress, SpinnerColumn, BarColumn, TextColumn, TimeElapsedColumn
        results = []
        if csv_files:
            # use task.description to show current filename reliably
            with Progress(SpinnerColumn(), TextColumn('[cyan]{task.description}'), BarColumn(), TimeElapsedColumn(), transient=False, refresh_per_second=10) as prog:
                task = prog.add_task('', total=len(csv_files))
                for p in csv_files:
                    # update description to current filename then process
                    prog.update(task, description=str(p.name))
                    info = inspect_file(p)
                    results.append(info)
                    prog.advance(task)
        else:
            logger.info('未发现 CSV 文件')

        # render table and perform consistency check (reuse merge1 logic)
        render_table(results)

        # 检查字段一致性 using merge1's approach
        from collections import defaultdict
        sets = []
        mapping = defaultdict(list)
        for r in results:
            if r.get('error'):
                continue
            fields = tuple(sorted(r.get('fields') or []))
            fs = frozenset(fields)
            sets.append(fs)
            mapping[fs].append(r['path'])

        if not sets:
            logger.info('No CSV files found to compare.')
        else:
            unique_sets = list(mapping.keys())
            if len(unique_sets) == 1:
                logger.info('All CSV files have identical field sets.')
            else:
                logger.warning('CSV files have differing field sets. Detailed differences follow:')
                baseline = max(mapping.items(), key=lambda kv: len(kv[1]))[0]
                baseline_fields = sorted(baseline)
                from rich.console import Console
                from rich.table import Table
                console = Console()
                diff_table = Table(title='CSV field-set differences')
                diff_table.add_column('file')
                diff_table.add_column('missing (vs baseline)', overflow='fold')
                diff_table.add_column('extra (vs baseline)', overflow='fold')
                for s, paths in mapping.items():
                    if s == baseline:
                        continue
                    s_fields = sorted(s)
                    missing = [f for f in baseline_fields if f not in s_fields]
                    extra = [f for f in s_fields if f not in baseline_fields]
                    for p in paths:
                        diff_table.add_row(p, ', '.join(missing) or '-', ', '.join(extra) or '-')
                console.print(diff_table)

        logger.info('merge1 返回：0')
    except Exception:
        logger.exception('调用 merge1 功能失败')

    # If user requested an output, perform concatenation (append) of all CSVs
    out_path = args.output
    if out_path is None:
        from datetime import datetime
        ts = datetime.now().strftime('%Y%m%d_%H%M%S')
        out_dir = ROOT / 'outputs'
        out_dir.mkdir(parents=True, exist_ok=True)
        out_path = out_dir / f'merged_{ts}.csv'
    else:
        out_path = Path(out_path)
        out_path.parent.mkdir(parents=True, exist_ok=True)

    # load each CSV with pandas and concat vertically, aligning columns
    try:
        import pandas as pd
        dfs = []
        csv_paths = [Path(r['path']) for r in results if not r.get('error')]
        # 按文件名排序，确保合并顺序稳定
        csv_paths = sorted(csv_paths, key=lambda p: str(p.name))
        for p in csv_paths:
            try:
                df = pd.read_csv(p, dtype=str)
                dfs.append(df)
            except Exception:
                # fallback: try with utf-8-sig
                try:
                    df = pd.read_csv(p, dtype=str, encoding='utf-8-sig')
                    dfs.append(df)
                except Exception:
                    logger.exception(f'读取 CSV 失败: {p}')
        if dfs:
            # align columns by union and concat
            all_cols = []
            for df in dfs:
                for c in df.columns:
                    if c not in all_cols:
                        all_cols.append(c)
            aligned = [df.reindex(columns=all_cols) for df in dfs]
            merged = pd.concat(aligned, axis=0, ignore_index=True)
            merged.to_csv(out_path, index=False)
            logger.info(f'已将 {len(dfs)} 个 CSV 合并为: {out_path} (rows={len(merged)})')
        else:
            logger.info('没有可用于合并的 CSV 文件')
    except Exception:
        logger.exception('合并 CSV 失败')

    try:
        if callable(end) and start_time is not None:
            end(start_time, TASK_LABEL)
    except Exception:
        logger.debug('结束回调失败')

    return 0


if __name__ == '__main__':
    raise SystemExit(main())
