"""Import header used by generated scripts when not embedding the full snippet.
This sets project root on sys.path and imports the helper API from src.bootstrap_helpers
so generated scripts can remain minimal (one-line import equivalent).
"""
from pathlib import Path
"""scripts/merge1.py

扫描 `data` 目录下的文件（支持 csv, tsv, json, jsonl, yaml），收集每个文件的字段名并以 rich.Table 输出。

该脚本对缺失或无法解析的文件会记录错误并继续处理其它文件。
"""

from pathlib import Path
import sys
import os
import json
from typing import Iterable, Set, List, Dict, Any

# Ensure project root is on sys.path so relative imports (if any) work
ROOT = Path(__file__).resolve().parent.parent
sys.path.insert(0, str(ROOT))

try:
    # prefer project helpers if available
    from src.bootstrap_helpers import get_logger
except Exception:
    def get_logger(name: str = "merge1", level: str = "INFO"):
        import logging
        logger = logging.getLogger(name)
        if not logger.handlers:
            h = logging.StreamHandler()
            fmt = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
            h.setFormatter(fmt)
            logger.addHandler(h)
        logger.setLevel(level)
        return logger

logger = get_logger("merge1")


def _collect_csv_fields(path: Path) -> List[str]:
    try:
        import pandas as pd
        df = pd.read_csv(path, nrows=0)
        return list(df.columns)
    except Exception:
        # fallback to naive header read
        try:
            with path.open('r', encoding='utf-8') as f:
                header = f.readline().strip()
            if header:
                sep = ','
                if '\t' in header:
                    sep = '\t'
                return [c.strip() for c in header.split(sep) if c.strip()]
        except Exception:
            pass
    return []


def _collect_json_fields(path: Path, max_items: int = 5) -> List[str]:
    try:
        with path.open('r', encoding='utf-8') as f:
            obj = json.load(f)
        if isinstance(obj, dict):
            return list(obj.keys())
        elif isinstance(obj, list):
            keys: Set[str] = set()
            for i, item in enumerate(obj):
                if isinstance(item, dict):
                    keys.update(item.keys())
                if i + 1 >= max_items:
                    break
            return sorted(keys)
    except Exception:
        return []


def _collect_jsonl_fields(path: Path, max_lines: int = 20) -> List[str]:
    keys: Set[str] = set()
    try:
        with path.open('r', encoding='utf-8') as f:
            for i, line in enumerate(f):
                if not line.strip():
                    continue
                try:
                    obj = json.loads(line)
                    if isinstance(obj, dict):
                        keys.update(obj.keys())
                except Exception:
                    continue
                if i + 1 >= max_lines:
                    break
    except Exception:
        return []
    return sorted(keys)


def _collect_yaml_fields(path: Path, max_docs: int = 5) -> List[str]:
    try:
        import yaml
        with path.open('r', encoding='utf-8') as f:
            docs = list(yaml.safe_load_all(f))
        keys: Set[str] = set()
        for i, doc in enumerate(docs):
            if isinstance(doc, dict):
                keys.update(doc.keys())
            elif isinstance(doc, list):
                for item in doc[:max_docs]:
                    if isinstance(item, dict):
                        keys.update(item.keys())
            if i + 1 >= max_docs:
                break
        return sorted(keys)
    except Exception:
        return []


def inspect_file(path: Path) -> Dict[str, Any]:
    suffix = path.suffix.lower()
    info = {"path": str(path), "type": suffix or "(no-ext)", "fields": [], "error": None}
    try:
        if suffix in ('.csv', '.tsv'):
            info['type'] = 'csv' if suffix == '.csv' else 'tsv'
            info['fields'] = _collect_csv_fields(path)
        elif suffix == '.json':
            info['fields'] = _collect_json_fields(path)
        elif suffix in ('.jsonl', '.ndjson'):
            info['type'] = 'jsonl'
            info['fields'] = _collect_jsonl_fields(path)
        elif suffix in ('.yml', '.yaml'):
            info['fields'] = _collect_yaml_fields(path)
        else:
            # try to sniff by content for text-based formats
            txt = ''
            try:
                with path.open('r', encoding='utf-8') as f:
                    txt = f.read(2048)
            except Exception:
                pass
            if txt.lstrip().startswith('{') or txt.lstrip().startswith('['):
                info['type'] = 'json'
                try:
                    info['fields'] = _collect_json_fields(path)
                except Exception:
                    info['fields'] = []
            elif '\n' in txt and (',' in txt.split('\n', 1)[0] or '\t' in txt.split('\n', 1)[0]):
                info['type'] = 'csv'
                info['fields'] = _collect_csv_fields(path)
            else:
                info['error'] = 'unsupported/unknown format'
    except Exception as e:
        info['error'] = str(e)
    return info


def render_table(results: Iterable[Dict[str, Any]]):
    from rich.table import Table
    from rich.console import Console
    console = Console()
    table = Table(title="Data files fields summary")
    table.add_column("path", overflow='fold')
    table.add_column("type")
    table.add_column("#fields", justify='right')
    table.add_column("fields", overflow='fold')

    for r in results:
        if r.get('error'):
            table.add_row(r['path'], r.get('type', ''), "-", f"ERROR: {r['error']}")
        else:
            fields = r.get('fields') or []
            s = ', '.join(fields[:20])
            if len(fields) > 20:
                s += ', ...'
            table.add_row(r['path'], r.get('type', ''), str(len(fields)), s)

    console.print(table)


def main(argv: List[str] | None = None):
    import argparse
    parser = argparse.ArgumentParser(description='Scan data directory and report fields for files')
    parser.add_argument('--dir', '-d', default='data', help='data directory to scan')
    parser.add_argument('--recursive', '-r', action='store_true', help='scan recursively')
    parser.add_argument('--verbose', '-v', action='store_true', help='verbose logging')
    args = parser.parse_args(argv)

    if args.verbose:
        logger.setLevel('DEBUG')

    data_dir = Path(args.dir)
    if not data_dir.exists() or not data_dir.is_dir():
        logger.error(f"data directory not found: {data_dir.resolve()}")
        return 2

    if args.recursive:
        files = [p for p in data_dir.rglob('*') if p.is_file()]
    else:
        files = [p for p in data_dir.iterdir() if p.is_file()]

    # filter out system files and only consider csv-like files
    csv_files = []
    for p in sorted(files):
        if p.name == '.DS_Store':
            logger.debug(f"skipping system file {p.name}")
            continue
        if p.suffix.lower() == '.csv' or p.suffix.lower() == '.tsv' or p.suffix == '':
            csv_files.append(p)
        else:
            # try to sniff first 256 chars to see if it looks like CSV (has comma or tab in first line)
            try:
                with p.open('r', encoding='utf-8') as f:
                    first = f.readline(256)
                if first and (',' in first or '\t' in first):
                    csv_files.append(p)
            except Exception:
                logger.debug(f"ignoring non-text or unreadable file {p}")

    results = []
    for p in csv_files:
        logger.debug(f"inspecting {p}")
        info = inspect_file(p)
        # Ensure we only keep csv-typed results for the table
        if info.get('type') in ('csv', 'tsv') or (p.suffix.lower() == '.csv'):
            results.append(info)

    render_table(results)

    # 检查字段集合一致性（仅 csv 文件）
    from collections import defaultdict
    sets = []
    mapping = defaultdict(list)  # frozenset -> list of paths
    for r in results:
        if r.get('error'):
            continue
        fields = tuple(sorted(r.get('fields') or []))
        fs = frozenset(fields)
        sets.append(fs)
        mapping[fs].append(r['path'])

    console_msg = []
    if not sets:
        logger.info('No CSV files found to compare.')
        return 0

    unique_sets = list(mapping.keys())
    if len(unique_sets) == 1:
        logger.info('All CSV files have identical field sets.')
    else:
        logger.warning('CSV files have differing field sets. Detailed differences follow:')
        # choose the largest-occurrence set as baseline
        baseline = max(mapping.items(), key=lambda kv: len(kv[1]))[0]
        baseline_fields = sorted(baseline)
        from rich.console import Console
        from rich.table import Table
        console = Console()
        diff_table = Table(title='CSV field-set differences')
        diff_table.add_column('file')
        diff_table.add_column('missing (vs baseline)', overflow='fold')
        diff_table.add_column('extra (vs baseline)', overflow='fold')
        for s, paths in mapping.items():
            if s == baseline:
                continue
            s_fields = sorted(s)
            missing = [f for f in baseline_fields if f not in s_fields]
            extra = [f for f in s_fields if f not in baseline_fields]
            for p in paths:
                diff_table.add_row(p, ', '.join(missing) or '-', ', '.join(extra) or '-')
        console.print(diff_table)
    return 0


if __name__ == '__main__':
    raise SystemExit(main())
