import json
import os
import argparse
from collections import defaultdict
from typing import Dict, List, Any

import pandas as pd


def load_jsonl(file_path: str) -> List[Dict[str, Any]]:
    records: List[Dict[str, Any]] = []
    with open(file_path, "r", encoding="utf-8") as f:
        for line in f:
            line = line.strip()
            if not line:
                continue
            try:
                records.append(json.loads(line))
            except json.JSONDecodeError:
                continue
    return records


def extract_core_fields(records: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
    core: List[Dict[str, Any]] = []
    for r in records:
        evaluation = r.get("evaluation") or {}
        classification = r.get("classification") or {}
        label_value = r.get("label")
        label_name = r.get("label_name")
        if label_value is None:
            label_value = evaluation.get("label_id")
        if label_name is None:
            label_name = evaluation.get("label_name")
        if label_value is None:
            label_value = classification.get("label_id")
        if label_name is None:
            label_name = classification.get("label_name")

        item = {
            "label": label_value,
            "label_name": label_name,
            "id": r.get("unit_id"),
            "add_content": r.get("add_content"),
            "remove_content": r.get("remove_content"),
            "context_before": r.get("context_before"),
            "context_after": r.get("context_after"),
        }
        if item["label"] is not None and item["id"] is not None:
            core.append(item)
    return core


def stratified_kfold_split(core: List[Dict[str, Any]], k: int = 5, seed: int = 42) -> List[List[int]]:
    label_to_indices: Dict[Any, List[int]] = defaultdict(list)
    for idx, item in enumerate(core):
        label_to_indices[item["label"]].append(idx)

    import random
    rng = random.Random(seed)

    folds: List[List[int]] = [[] for _ in range(k)]
    for _, indices in label_to_indices.items():
        rng.shuffle(indices)
        for i, sample_idx in enumerate(indices):
            folds[i % k].append(sample_idx)
    return folds


def save_jsonl(path: str, rows: List[Dict[str, Any]]) -> None:
    with open(path, "w", encoding="utf-8") as f:
        for r in rows:
            f.write(json.dumps(r, ensure_ascii=False) + "\n")


def main():
    parser = argparse.ArgumentParser(description="准备训练数据并进行5折分层，可选token长度过滤")
    parser.add_argument(
        "--input",
        type=str,
        default=None,
        help="输入JSONL路径，默认使用项目内evaluation_result/auto_pass_*.jsonl",
    )
    parser.add_argument(
        "--output_dir",
        type=str,
        default="data",
        help="输出目录",
    )
    parser.add_argument(
        "--text_format",
        type=str,
        choices=["with_context", "bert_pair"],
        default="with_context",
        help="文本构造：with_context 或 bert_pair",
    )
    parser.add_argument(
        "--filter_by_tokens",
        action="store_true",
        help="是否按token长度过滤（默认关闭）",
    )
    parser.add_argument(
        "--no_filter_by_tokens",
        action="store_true",
        help="关闭token过滤（高优先级）",
    )
    parser.add_argument(
        "--max_tokens",
        type=int,
        default=512,
        help="最大token长度（含）",
    )
    parser.add_argument(
        "--tokenizer",
        type=str,
        default="bert-base-chinese",
        help="用于统计token长度的分词器",
    )
    parser.add_argument(
        "--seed",
        type=int,
        default=42,
        help="随机种子",
    )
    args = parser.parse_args()

    if args.input:
        input_path = args.input
    else:
        # 查找默认的evaluation_result目录
        project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
        eval_dir = os.path.join(project_root, "data", "evaluation_result")
        candidates = []
        if os.path.isdir(eval_dir):
            for name in os.listdir(eval_dir):
                if name.startswith("auto_pass_") and name.endswith(".jsonl"):
                    candidates.append(os.path.join(eval_dir, name))
        input_path = max(candidates, key=lambda p: os.path.getmtime(p)) if candidates else None
        if input_path is None:
            raise SystemExit("未在 data/evaluation_result 下找到 auto_pass_*.jsonl，请使用 --input 指定输入文件")
    
    output_dir = args.output_dir
    os.makedirs(output_dir, exist_ok=True)
    
    records = load_jsonl(input_path)
    core = extract_core_fields(records)

    def build_text(context_before: str, add_content: str, remove_content: str, context_after: str):
        if args.text_format == "bert_pair":
            return (remove_content or "", add_content or "")
        cb = context_before or ""
        ca = context_after or ""
        cb_tail = cb[-50:] if len(cb) > 50 else cb
        ca_head = ca[:50] if len(ca) > 50 else ca
        return f"{cb_tail}【原内容】{remove_content or ''}【修改为】{add_content or ''}【后续内容】{ca_head}"

    filter_by_tokens = False if getattr(args, "no_filter_by_tokens", False) else bool(getattr(args, "filter_by_tokens", False))

    filtered_core: List[Dict[str, Any]] = []
    filtered_out: List[Dict[str, Any]] = []
    tokenizer = None
    if filter_by_tokens:
        try:
            from transformers import AutoTokenizer
            tokenizer = AutoTokenizer.from_pretrained(args.tokenizer)
        except Exception as e:
            print(f"[WARN] 无法加载分词器 {args.tokenizer}: {e}，将跳过按token长度过滤")
            tokenizer = None

    for item in core:
        text_or_pair = build_text(
            item.get("context_before"),
            item.get("add_content"),
            item.get("remove_content"),
            item.get("context_after"),
        )
        token_len = None
        if tokenizer is not None:
            if args.text_format == "bert_pair" and isinstance(text_or_pair, tuple):
                a, b = text_or_pair
                enc = tokenizer.encode_plus(a, b, add_special_tokens=True)
                ids = enc.get("input_ids", [])
            else:
                ids = tokenizer.encode(text_or_pair if isinstance(text_or_pair, str) else str(text_or_pair), add_special_tokens=True)
            token_len = len(ids)
            if token_len > args.max_tokens:
                filtered_out.append({
                    "id": item.get("id"),
                    "label": item.get("label"),
                    "label_name": item.get("label_name"),
                    "token_len": token_len,
                    "max_tokens": args.max_tokens,
                })
                continue
        new_item = {
            "label": item.get("label"),
            "label_name": item.get("label_name"),
            "id": item.get("id"),
            "add_content": item.get("add_content"),
            "remove_content": item.get("remove_content"),
            "context_before": item.get("context_before"),
            "context_after": item.get("context_after"),
        }
        if args.text_format == "bert_pair" and isinstance(text_or_pair, tuple):
            a, b = text_or_pair
            new_item["__text_a__"] = a
            new_item["__text_b__"] = b
        else:
            new_item["__text__"] = text_or_pair if isinstance(text_or_pair, str) else str(text_or_pair)
        filtered_core.append(new_item)

    k = 5
    folds = stratified_kfold_split(filtered_core, k=k, seed=args.seed)
    if filtered_out:
        with open(os.path.join(output_dir, "filtered_out.jsonl"), "w", encoding="utf-8") as f:
            for r in filtered_out:
                f.write(json.dumps(r, ensure_ascii=False) + "\n")
        df_f = pd.DataFrame(filtered_out)
        filtered_meta = {
            "filtered_count": len(filtered_out),
            "filtered_label_distribution": df_f["label"].value_counts().to_dict() if not df_f.empty else {},
        }
        with open(os.path.join(output_dir, "filtered_meta.json"), "w", encoding="utf-8") as f:
            json.dump(filtered_meta, f, ensure_ascii=False, indent=2)
    for i in range(k):
        fold_rows = []
        for j in folds[i]:
            it = filtered_core[j].copy()
            it.pop("__text__", None)
            it.pop("__text_a__", None)
            it.pop("__text_b__", None)
            fold_rows.append(it)
        save_jsonl(os.path.join(output_dir, f"origin_fold_{i+1}.jsonl"), fold_rows)

    meta = {"total": len(core), "label_distribution": {}, "folds": []}
    df = pd.DataFrame(filtered_core)
    if not df.empty and "label" in df.columns:
        meta["label_distribution"] = df["label"].value_counts().to_dict()
    for i in range(k):
        labels = [filtered_core[j]["label"] for j in folds[i]]
        s = pd.Series(labels)
        meta["folds"].append({
            "fold": i + 1,
            "size": len(labels),
            "label_distribution": s.value_counts().to_dict() if not s.empty else {},
        })
    with open(os.path.join(output_dir, "split_meta.json"), "w", encoding="utf-8") as f:
        json.dump(meta, f, ensure_ascii=False, indent=2)

    for i in range(k):
        out_path = os.path.join(output_dir, f"fold_{i+1}.jsonl")
        formatted: List[Dict[str, Any]] = []
        for j in folds[i]:
            row = filtered_core[j]
            if args.text_format == "bert_pair" and ("__text_a__" in row and "__text_b__" in row):
                formatted.append({
                    "id": row.get("id"),
                    "text_a": row.get("__text_a__"),
                    "text_b": row.get("__text_b__"),
                    "label_name": row.get("label_name"),
                    "label_id": row.get("label"),
                })
            else:
                formatted.append({
                    "id": row.get("id"),
                    "text": row.get("__text__"),
                    "label_name": row.get("label_name"),
                    "label_id": row.get("label"),
                })
        save_jsonl(out_path, formatted)

    print(json.dumps({
        "kept_total": len(filtered_core),
        "filtered_total": len(filtered_out),
        "label_distribution": meta.get("label_distribution", {}),
        "fold_sizes": {f"fold_{i+1}": len([idx for idx in folds[i]]) for i in range(k)},
    }, ensure_ascii=False, indent=2))
    print(f"Created {k} folds under: {output_dir}")


if __name__ == "__main__":
    main()


