#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
AGIEVAL (3-shot) evaluator for local Hugging Face models (e.g., Llama-2-7b).

Features
- Evaluate one or multiple AGIEVAL subsets (multiple-choice tasks)
- 3-shot prompting pulled from the dataset's dev/validation split
- Robust answer parsing (A/B/C/D or option text)
- Works with base or chat variants (llama-2 vs llama-2-chat) via --chat flag
- Deterministic generation (seeded)
- CSV + JSONL result logs, per-task and overall accuracy

Usage
------
# 1) Install deps
pip install -U transformers accelerate datasets sentencepiece torch

# 2) Run (CPU or GPU)
python agieval_eval.py \
  --model hf_or_local_path \
  --tasks gaokao-english gaokao-chinese logiqa-en lsat-ar \
  --shots 3 \
  --max-samples 0 \
  --chat false \
  --bf16 true

Arguments
---------
--model           Hugging Face model id or local path (e.g., meta-llama/Llama-2-7b-hf or ./llama2-7b)
--tasks           Space-separated AGIEVAL subset names. If omitted, a sensible default list is used.
--shots           Number of few-shot examples (default: 3)
--max-samples     Max test samples per task (0 means all)
--chat            Use chat-style wrapper (for -chat models). Default: false
--bf16            Use bfloat16 if available. Default: true
--fp16            Use float16 if available. Default: false (ignored if bf16 true)
--device          torch device string (cuda, cuda:0, mps, cpu). Auto-detect if empty.
--batch-size      Number of prompts to generate concurrently (default 1; larger for vLLM/TPU not supported here)
--outdir          Directory to write logs (default: ./runs/agieval_3shot)

Notes
-----
- Llama-2 base models require acceptance of license to download from HF.
- For better speed, ensure you have a GPU and install the right torch build.
- This script uses standard generate() for simplicity. For higher throughput, integrate vLLM separately.
"""

import argparse
import json
import os
import random
import re
import time
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, List, Optional, Tuple

import torch
from datasets import load_dataset, Dataset
from transformers import (
    AutoModelForCausalLM,
    AutoTokenizer,
    GenerationConfig,
)

# -------------------------------
# Utility: seeding & device
# -------------------------------
def set_seed(seed: int = 42):
    random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)


def pick_device(arg: Optional[str] = None) -> str:
    if arg:
        return arg
    if torch.cuda.is_available():
        return "cuda"
    if getattr(torch.backends, "mps", None) and torch.backends.mps.is_available():
        return "mps"
    return "cpu"


# -------------------------------
# AGIEVAL helpers
# -------------------------------
DEFAULT_TASKS = [
    # A common subset of multiple-choice tasks in AGIEVAL.
    # You can add/remove freely; names must match the HF dataset configs.
    "gaokao-english",
    "gaokao-chinese",
    "gaokao-physics",
    "gaokao-history",
    "logiqa-en",
    "logiqa-zh",
    "lsat-ar",
    "lsat-lr",
    "lsat-rc",
    "sat-en",
    "sat-math",
    "math",
]

CHOICE_LETTERS = ["A", "B", "C", "D", "E", "F"]  # allow >4 when needed


@dataclass
class Sample:
    question: str
    choices: List[str]
    answer: str  # gold letter or text
    meta: Dict


def load_agieval_split(task: str, split: str) -> Dataset:
    """Load a single AGIEVAL subset split.
    The HF dataset name is 'agieval' with config=<task>.
    """
    try:
        ds = load_dataset("agieval", name=task, split=split)
    except Exception as e:
        raise RuntimeError(f"Failed to load agieval/{task}:{split} -> {e}")
    return ds


def extract_sample(rec) -> Sample:
    """Normalize a record to Sample.
    AGIEVAL schemas vary slightly across subsets, but generally include fields like:
    - 'question', 'options' (list), 'answer' (letter), and sometimes solution/explanation.
    """
    # Try common field names
    q = rec.get("question") or rec.get("prompt") or rec.get("query")
    options = rec.get("options") or rec.get("choices") or rec.get("candidate")
    ans = rec.get("answer") or rec.get("label")

    if q is None or options is None or ans is None:
        # Fallback: try to infer
        raise ValueError("Unexpected AGIEVAL schema; please inspect record keys: " + ", ".join(sorted(rec.keys())))

    # Some datasets store 'answer' as text or as a letter; normalize to letter when possible
    letter = None
    if isinstance(ans, str):
        # Either a letter or full text
        m = re.fullmatch(r"[A-Z]", ans.strip())
        if m:
            letter = ans.strip().upper()
        else:
            # try to map text to index
            try:
                idx = options.index(ans)
                letter = CHOICE_LETTERS[idx]
            except Exception:
                letter = ans.strip()
    elif isinstance(ans, int):
        if 0 <= ans < len(options):
            letter = CHOICE_LETTERS[ans]
        else:
            letter = str(ans)
    else:
        letter = str(ans)

    return Sample(question=q, choices=list(options), answer=letter, meta={})


# -------------------------------
# Prompting
# -------------------------------
BASE_INSTRUCTION = (
    "You are a careful solver for multiple-choice exams.\n"
    "Choose the single best answer from the options.\n"
    "Respond with only the letter (e.g., A)."
)

CHAT_SYS = "You are a helpful, precise assistant for multiple-choice exams."


def format_example(sample: Sample) -> str:
    opts = "\n".join(f"{CHOICE_LETTERS[i]}. {c}" for i, c in enumerate(sample.choices))
    return f"Question: {sample.question}\n{opts}\nAnswer: {sample.answer}"


def format_query(sample: Sample) -> str:
    opts = "\n".join(f"{CHOICE_LETTERS[i]}. {c}" for i, c in enumerate(sample.choices))
    return f"Question: {sample.question}\n{opts}\nAnswer:"


def build_fewshot_prompt(shots: List[Sample], query: Sample, chat: bool = False) -> str:
    examples = "\n\n".join(format_example(s) for s in shots)
    user = BASE_INSTRUCTION + "\n\n" + examples + "\n\n" + format_query(query)
    if not chat:
        return user
    # LLaMA chat style wrapper
    # Minimal wrapper; adjust for other chat models as needed
    return f"<<SYS>>\n{CHAT_SYS}\n<</SYS>>\n\n[INST] {user.strip()} [/INST]"


# -------------------------------
# Answer parsing
# -------------------------------
LETTER_RE = re.compile(r"\b([A-F])\b")


def parse_answer(text: str, choices_len: int) -> Optional[str]:
    # Try to find a standalone letter near the end
    tail = text.strip().splitlines()[-1]
    m = LETTER_RE.search(tail.upper())
    if m:
        letter = m.group(1)
        if CHOICE_LETTERS.index(letter) < choices_len:
            return letter
    # fallback: first letter anywhere
    m2 = LETTER_RE.search(text.upper())
    if m2 and CHOICE_LETTERS.index(m2.group(1)) < choices_len:
        return m2.group(1)
    return None


# -------------------------------
# Evaluation Loop
# -------------------------------
@dataclass
class Pred:
    task: str
    idx: int
    gold: str
    pred: Optional[str]
    correct: int
    prompt_tokens: int
    completion_tokens: int
    latency_ms: float


def run_task(
    task: str,
    model,
    tokenizer,
    device: str,
    shots_n: int,
    max_samples: int,
    chat: bool,
    gen_config: GenerationConfig,
    outdir: Path,
) -> Tuple[float, List[Pred]]:
    dev_split = None
    for split in ["dev", "validation", "train"]:
        try:
            dev_split = load_agieval_split(task, split)
            break
        except Exception:
            continue
    if dev_split is None:
        raise RuntimeError(f"No dev/validation/train split found for {task} to draw shots from.")

    test_split = load_agieval_split(task, "test")

    # Build a pool of samples for shots
    dev_samples: List[Sample] = [extract_sample(x) for x in dev_split]
    test_samples: List[Sample] = [extract_sample(x) for x in test_split]

    preds: List[Pred] = []

    total = len(test_samples) if max_samples <= 0 else min(max_samples, len(test_samples))
    for i in range(total):
        q = test_samples[i]
        # draw shots with replacement avoiding same question text (best-effort)
        pool = [s for s in dev_samples if s.question.strip() != q.question.strip()]
        if len(pool) < shots_n:
            shots = random.sample(dev_samples, min(shots_n, len(dev_samples)))
        else:
            shots = random.sample(pool, shots_n)

        prompt = build_fewshot_prompt(shots, q, chat=chat)

        inputs = tokenizer(prompt, return_tensors="pt").to(device)

        start = time.time()
        with torch.no_grad():
            out = model.generate(
                **inputs,
                max_new_tokens=8,  # only need the letter
                do_sample=False,
                pad_token_id=tokenizer.eos_token_id,
                generation_config=gen_config,
            )
        latency_ms = (time.time() - start) * 1000

        # decode only the added portion
        gen_text = tokenizer.decode(out[0][inputs["input_ids"].shape[-1]:], skip_special_tokens=True)
        letter = parse_answer(gen_text, len(q.choices))
        gold_letter = q.answer if re.fullmatch(r"[A-F]", q.answer) else q.answer[:1].upper()
        correct = int(letter == gold_letter)

        preds.append(
            Pred(
                task=task,
                idx=i,
                gold=gold_letter,
                pred=letter,
                correct=correct,
                prompt_tokens=inputs["input_ids"].numel(),
                completion_tokens=(out[0].shape[-1] - inputs["input_ids"].shape[-1]),
                latency_ms=latency_ms,
            )
        )

    acc = sum(p.correct for p in preds) / max(1, len(preds))

    # write logs
    outdir.mkdir(parents=True, exist_ok=True)
    with open(outdir / f"{task}.jsonl", "w", encoding="utf-8") as f:
        for p in preds:
            f.write(json.dumps(p.__dict__, ensure_ascii=False) + "\n")

    return acc, preds


# -------------------------------
# Main
# -------------------------------

def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--model", type=str, required=True)
    parser.add_argument("--tasks", type=str, nargs="*", default=None)
    parser.add_argument("--shots", type=int, default=3)
    parser.add_argument("--max-samples", type=int, default=0)
    parser.add_argument("--chat", type=lambda x: str(x).lower() in {"1", "true", "yes"}, default=False)
    parser.add_argument("--bf16", type=lambda x: str(x).lower() in {"1", "true", "yes"}, default=True)
    parser.add_argument("--fp16", type=lambda x: str(x).lower() in {"1", "true", "yes"}, default=False)
    parser.add_argument("--device", type=str, default="")
    parser.add_argument("--batch-size", type=int, default=1)
    parser.add_argument("--outdir", type=str, default="./runs/agieval_3shot")
    args = parser.parse_args()

    set_seed(42)

    device = pick_device(args.device)

    torch_dtype = None
    if args.bf16 and torch.cuda.is_available():
        torch_dtype = torch.bfloat16
    elif args.fp16 and torch.cuda.is_available():
        torch_dtype = torch.float16

    print(f"Loading model: {args.model} on {device} (dtype={torch_dtype})")
    tokenizer = AutoTokenizer.from_pretrained(args.model, use_fast=True)
    if tokenizer.pad_token is None:
        tokenizer.pad_token = tokenizer.eos_token

    model = AutoModelForCausalLM.from_pretrained(
        args.model,
        torch_dtype=torch_dtype,
        device_map="auto" if device.startswith("cuda") else None,
    )
    model.eval()

    gen_config = GenerationConfig(
        temperature=0.0,
        top_p=1.0,
        repetition_penalty=1.0,
    )

    tasks = args.tasks or DEFAULT_TASKS

    outdir = Path(args.outdir)
    summary = {}
    all_preds = []

    for task in tasks:
        print(f"\n=== Running task: {task} ===")
        try:
            acc, preds = run_task(
                task=task,
                model=model,
                tokenizer=tokenizer,
                device=device,
                shots_n=args.shots,
                max_samples=args.max_samples,
                chat=args.chat,
                gen_config=gen_config,
                outdir=outdir,
            )
            summary[task] = acc
            all_preds.extend(preds)
            print(f"Task {task}: Accuracy = {acc:.4f} ({sum(p.correct for p in preds)}/{len(preds)})")
        except Exception as e:
            print(f"Task {task}: ERROR -> {e}")

    # Overall
    totals = [p.correct for p in all_preds]
    overall = sum(totals) / max(1, len(totals)) if totals else 0.0
    print("\n=== Summary ===")
    for k, v in summary.items():
        print(f"{k:20s}: {v:.4f}")
    print(f"OVERALL             : {overall:.4f}")

    # Save summary
    with open(outdir / "summary.json", "w", encoding="utf-8") as f:
        json.dump({"per_task": summary, "overall": overall}, f, ensure_ascii=False, indent=2)


if __name__ == "__main__":
    main()
