import argparse
import json
import os
from typing import Any, Dict, List

import numpy as np
import torch
from sklearn.metrics import accuracy_score, f1_score, confusion_matrix, classification_report
from torch.utils.data import Dataset


def load_jsonl(path: str) -> List[Dict[str, Any]]:
    rows: List[Dict[str, Any]] = []
    with open(path, "r", encoding="utf-8") as f:
        for line in f:
            line = line.strip()
            if not line:
                continue
            rows.append(json.loads(line))
    return rows


def ensure_dir(path: str) -> None:
    os.makedirs(path, exist_ok=True)


class EvalDataset(Dataset):
    def __init__(self, items: List[Dict[str, Any]], tokenizer, max_length: int = 256):
        self.items = items
        self.tokenizer = tokenizer
        self.max_length = max_length

    def __len__(self) -> int:
        return len(self.items)

    def __getitem__(self, idx: int):
        it = self.items[idx]
        if it.get("text_a") is not None and it.get("text_b") is not None:
            enc = self.tokenizer(
                it["text_a"],
                it["text_b"],
                max_length=self.max_length,
                padding="max_length",
                truncation=True,
                return_tensors="pt",
            )
        else:
            enc = self.tokenizer(
                it["text"],
                max_length=self.max_length,
                padding="max_length",
                truncation=True,
                return_tensors="pt",
            )
        item = {k: v.squeeze(0) for k, v in enc.items()}
        item["labels"] = torch.tensor(it["label"], dtype=torch.long)
        item["id"] = it["id"]
        return item


def main():
    parser = argparse.ArgumentParser(description="评测已保存模型（fold_5 作为测试集）")
    parser.add_argument("--data_dir", type=str, default="data")
    parser.add_argument("--model_dir", type=str, default="models")
    parser.add_argument("--result_dir", type=str, default="data/result")
    parser.add_argument("--max_length", type=int, default=256)
    args = parser.parse_args()
    
    # 确保结果目录存在
    ensure_dir(args.result_dir)

    # Load label mapping
    label_mapping_path = os.path.join(args.model_dir, "label_mapping.json")
    if not os.path.exists(label_mapping_path):
        # 如果模型目录没有label_mapping.json，尝试从模型配置中获取
        from transformers import AutoConfig
        config = AutoConfig.from_pretrained(args.model_dir)
        label2id = config.label2id
        id2label = config.id2label
    else:
        with open(label_mapping_path, "r", encoding="utf-8") as f:
            mapping = json.load(f)
        label2id = mapping["label2id"]
        id2label = {int(k): v for k, v in mapping["id2label"].items()}

    # Load tokenizer and model
    from transformers import AutoTokenizer, AutoModelForSequenceClassification, Trainer

    tokenizer = AutoTokenizer.from_pretrained(args.model_dir)
    model = AutoModelForSequenceClassification.from_pretrained(args.model_dir)

    # Load test fold
    test_rows = load_jsonl(os.path.join(args.data_dir, "fold_5.jsonl"))
    # Map labels to indices as training time
    test_items = []
    for r in test_rows:
        lid = r.get("label_id")
        if lid not in label2id:
            continue
        item: Dict[str, Any] = {
            "id": r["id"],
            "label": label2id[lid],
            "label_id": lid,
            "label_name": r.get("label_name"),
        }
        if r.get("text_a") is not None and r.get("text_b") is not None:
            item["text_a"] = r.get("text_a")
            item["text_b"] = r.get("text_b")
        else:
            item["text"] = r.get("text")
        test_items.append(item)

    ds = EvalDataset(test_items, tokenizer, max_length=args.max_length)
    trainer = Trainer(model=model, tokenizer=tokenizer)
    output = trainer.predict(ds)
    logits = output.predictions
    y_true = output.label_ids.tolist()
    y_pred = np.argmax(logits, axis=-1).tolist()
    probs = torch.softmax(torch.tensor(logits), dim=-1).numpy()

    # Metrics
    acc = accuracy_score(y_true, y_pred)
    f1_macro = f1_score(y_true, y_pred, average="macro")
    f1_micro = f1_score(y_true, y_pred, average="micro")
    f1_weighted = f1_score(y_true, y_pred, average="weighted")
    metrics = {
        "accuracy": float(acc),
        "f1_macro": float(f1_macro),
        "f1_micro": float(f1_micro),
        "f1_weighted": float(f1_weighted),
    }
    with open(os.path.join(args.result_dir, "eval_metrics.json"), "w", encoding="utf-8") as f:
        json.dump(metrics, f, ensure_ascii=False, indent=2)

    all_labels = list(range(len(id2label)))
    cm = confusion_matrix(y_true, y_pred, labels=all_labels)
    with open(os.path.join(args.result_dir, "confusion_matrix.json"), "w", encoding="utf-8") as f:
        json.dump(cm.tolist(), f, ensure_ascii=False, indent=2)
    report = classification_report(y_true, y_pred, target_names=[id2label[i] for i in all_labels])
    with open(os.path.join(args.result_dir, "classification_report.txt"), "w", encoding="utf-8") as f:
        f.write(report)

    import csv
    csv_path = os.path.join(args.result_dir, "confusion_matrix_labeled.csv")
    ordered_label_ids = [id2label[i] for i in all_labels]
    with open(csv_path, "w", encoding="utf-8", newline="") as fcsv:
        writer = csv.writer(fcsv)
        header = ["true\\pred"] + ordered_label_ids
        writer.writerow(header)
        for i, row in enumerate(cm.tolist()):
            writer.writerow([ordered_label_ids[i]] + row)

    preds_path = os.path.join(args.result_dir, "test_predictions.jsonl")
    miscls_path = os.path.join(args.result_dir, "misclassified.jsonl")
    mis_count = 0
    with open(preds_path, "w", encoding="utf-8") as f_all, open(miscls_path, "w", encoding="utf-8") as f_bad:
        for i, it in enumerate(test_items):
            pred_label_id = id2label[y_pred[i]]
            entry = {
                "id": it["id"],
                "label_id_true": it["label_id"],
                "label_id_pred": pred_label_id,
                "label_name_true": it.get("label_name"),
                "label_name_pred": None,
                "probs": probs[i].tolist(),
            }
            if it.get("text_a") is not None and it.get("text_b") is not None:
                entry["text_a"] = it.get("text_a")
                entry["text_b"] = it.get("text_b")
            else:
                entry["text"] = it.get("text")
            f_all.write(json.dumps(entry, ensure_ascii=False) + "\n")
            if y_true[i] != y_pred[i]:
                f_bad.write(json.dumps(entry, ensure_ascii=False) + "\n")
                mis_count += 1
    print(f"Misclassified samples: {mis_count} / {len(test_items)}")

    print("Evaluation completed. Artifacts saved to:", args.result_dir)


if __name__ == "__main__":
    main()
