import argparse
import json
import os
import random
from dataclasses import dataclass
from typing import Any, Dict, List, Optional

import numpy as np
from sklearn.metrics import accuracy_score, f1_score, confusion_matrix, classification_report
import torch
from torch.utils.data import Dataset


def set_seed(seed: int) -> None:
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)


def load_jsonl(path: str) -> List[Dict[str, Any]]:
    rows: List[Dict[str, Any]] = []
    with open(path, "r", encoding="utf-8") as f:
        for line in f:
            line = line.strip()
            if not line:
                continue
            rows.append(json.loads(line))
    return rows


def ensure_dir(path: str) -> None:
    os.makedirs(path, exist_ok=True)


@dataclass
class TextLabelItem:
    id: str
    text: Optional[str]
    label_id: str
    label_name: Optional[str]
    text_b: Optional[str] = None


def load_folds(data_dir: str) -> Dict[str, List[TextLabelItem]]:
    folds: Dict[str, List[TextLabelItem]] = {}
    for i in range(1, 6):
        path = os.path.join(data_dir, f"fold_{i}.jsonl")
        rows = load_jsonl(path)
        items: List[TextLabelItem] = []
        for r in rows:
            tid = r.get("id")
            lid = r.get("label_id")
            lname = r.get("label_name")
            if tid is None or lid is None:
                continue
            if r.get("text_a") is not None and r.get("text_b") is not None:
                items.append(TextLabelItem(id=str(tid), text=str(r.get("text_a")), text_b=str(r.get("text_b")), label_id=str(lid), label_name=lname))
            elif r.get("text") is not None:
                items.append(TextLabelItem(id=str(tid), text=str(r.get("text")), label_id=str(lid), label_name=lname))
        folds[f"fold_{i}"] = items
    return folds


def build_label_mapping(train_items: List[TextLabelItem]) -> Dict[str, int]:
    unique_labels = sorted({it.label_id for it in train_items}, key=lambda x: (len(x), x))
    label2id = {lbl: idx for idx, lbl in enumerate(unique_labels)}
    return label2id


class ClassificationDataset(Dataset):
    def __init__(self, items: List[TextLabelItem], tokenizer, label2id: Dict[str, int], max_length: int = 512):
        self.items = items
        self.tokenizer = tokenizer
        self.label2id = label2id
        self.max_length = max_length

    def __len__(self) -> int:
        return len(self.items)

    def __getitem__(self, idx: int) -> Dict[str, Any]:
        it = self.items[idx]
        if it.text_b is not None:
            enc = self.tokenizer(
                it.text,
                it.text_b,
                max_length=self.max_length,
                padding="max_length",
                truncation=True,
                return_tensors="pt",
            )
        else:
            enc = self.tokenizer(
                it.text,
                max_length=self.max_length,
                padding="max_length",
                truncation=True,
                return_tensors="pt",
            )
        item = {k: v.squeeze(0) for k, v in enc.items()}
        item["labels"] = torch.tensor(self.label2id[it.label_id], dtype=torch.long)
        item["id"] = it.id
        return item


def compute_metrics_builder(id2label: Dict[int, str]):
    def compute_metrics(eval_pred):
        logits, labels = eval_pred
        preds = np.argmax(logits, axis=-1)
        acc = accuracy_score(labels, preds)
        f1_macro = f1_score(labels, preds, average="macro")
        f1_micro = f1_score(labels, preds, average="micro")
        f1_weighted = f1_score(labels, preds, average="weighted")
        return {
            "accuracy": acc,
            "f1_macro": f1_macro,
            "f1_micro": f1_micro,
            "f1_weighted": f1_weighted,
        }

    return compute_metrics


def save_eval_artifacts(output_dir: str, y_true: List[int], y_pred: List[int], id2label: Dict[int, str], test_items: List[TextLabelItem], probs: Optional[np.ndarray] = None) -> None:
    ensure_dir(output_dir)
    # Confusion matrix
    cm = confusion_matrix(y_true, y_pred, labels=list(range(len(id2label))))
    with open(os.path.join(output_dir, "confusion_matrix.json"), "w", encoding="utf-8") as f:
        json.dump(cm.tolist(), f, ensure_ascii=False, indent=2)

    # Classification report
    report = classification_report(y_true, y_pred, target_names=[id2label[i] for i in range(len(id2label))])
    with open(os.path.join(output_dir, "classification_report.txt"), "w", encoding="utf-8") as f:
        f.write(report)

    # Per-sample predictions
    pred_path = os.path.join(output_dir, "test_predictions.jsonl")
    with open(pred_path, "w", encoding="utf-8") as f:
        for i, it in enumerate(test_items):
            entry = {
                "id": it.id,
                "text": it.text,
                "label_id_true": it.label_id,
                "label_id_pred": id2label[y_pred[i]],
                "label_name_true": it.label_name,
                "label_name_pred": None,
            }
            if probs is not None:
                entry["probs"] = probs[i].tolist()
            f.write(json.dumps(entry, ensure_ascii=False) + "\n")


def main():
    parser = argparse.ArgumentParser(description="训练分类器：4折训练 + 1折测试")
    parser.add_argument("--data_dir", type=str, default="data")
    parser.add_argument("--output_dir", type=str, default="models")
    parser.add_argument("--model_name", type=str, default="bert-base-chinese", help="HF model id (pass your TinyBERT id if available)")
    parser.add_argument("--epochs", type=int, default=3)
    parser.add_argument("--batch_size", type=int, default=16)
    parser.add_argument("--lr", type=float, default=5e-5)
    parser.add_argument("--weight_decay", type=float, default=0.01)
    parser.add_argument("--warmup_ratio", type=float, default=0.1)
    parser.add_argument("--max_length", type=int, default=256)
    parser.add_argument("--seed", type=int, default=42)
    parser.add_argument("--fp16", action="store_true")
    parser.add_argument("--do_eval", action="store_true", help="训练结束是否立即评测（默认不评测，建议用独立脚本评测）")
    args = parser.parse_args()

    set_seed(args.seed)
    ensure_dir(args.output_dir)

    folds = load_folds(args.data_dir)
    train_items = folds["fold_1"] + folds["fold_2"] + folds["fold_3"] + folds["fold_4"]
    test_items = folds["fold_5"]
    if not train_items or not test_items:
        raise SystemExit("训练或测试数据为空，请先生成 fold_*.jsonl")

    label2id = build_label_mapping(train_items)
    id2label_strid = {v: k for k, v in label2id.items()}

    from transformers import AutoTokenizer, AutoConfig, AutoModelForSequenceClassification, Trainer, TrainingArguments

    tokenizer = AutoTokenizer.from_pretrained(args.model_name)
    num_labels = len(label2id)
    id2label = {i: id2label_strid[i] for i in range(num_labels)}
    config = AutoConfig.from_pretrained(
        args.model_name,
        num_labels=num_labels,
        id2label=id2label,
        label2id=label2id,
    )
    model = AutoModelForSequenceClassification.from_pretrained(args.model_name, config=config)

    train_ds = ClassificationDataset(train_items, tokenizer, label2id, max_length=args.max_length)
    test_ds = ClassificationDataset(test_items, tokenizer, label2id, max_length=args.max_length)
    warmup_steps = int(len(train_ds) * args.epochs * args.warmup_ratio / args.batch_size)
    training_args = TrainingArguments(
        output_dir=args.output_dir,
        per_device_train_batch_size=args.batch_size,
        per_device_eval_batch_size=args.batch_size,
        num_train_epochs=args.epochs,
        learning_rate=args.lr,
        weight_decay=args.weight_decay,
        save_strategy="epoch",
        warmup_steps=warmup_steps,
        fp16=args.fp16,
        logging_steps=50,
        save_total_limit=2,
    )

    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=train_ds,
        tokenizer=tokenizer,
    )

    trainer.train()

    if args.do_eval:
        eval_metrics = trainer.evaluate()
        with open(os.path.join(args.output_dir, "eval_metrics.json"), "w", encoding="utf-8") as f:
            json.dump({k: float(v) for k, v in eval_metrics.items()}, f, ensure_ascii=False, indent=2)

        preds_output = trainer.predict(test_ds)
        logits = preds_output.predictions
        y_true = preds_output.label_ids.tolist()
        y_pred = np.argmax(logits, axis=-1).tolist()
        probs = torch.softmax(torch.tensor(logits), dim=-1).numpy()

        save_eval_artifacts(args.output_dir, y_true, y_pred, id2label, test_items, probs)

    # 保存label映射到输出目录
    with open(os.path.join(args.output_dir, "label_mapping.json"), "w", encoding="utf-8") as f:
        json.dump({"label2id": label2id, "id2label": id2label}, f, ensure_ascii=False, indent=2)
    print("Training completed. Models saved to:", args.output_dir)


if __name__ == "__main__":
    main()


