from __future__ import annotations
from dataclasses import dataclass
from typing import Any, List, Tuple
from .utils import run_and_measure, linear_regression, score_from_time, score_from_memory, safe_exec
from .tasks import TaskSpec, TASK_SPECS

@dataclass
class MetricResult:
    name: str
    value: Any
    score: float
    note: str

@dataclass
class ProblemResult:
    problem_id: int
    func_name: str
    code: str
    metrics: List[MetricResult]
    total_score: float
    missing_func: bool = False

def compute_readability_score(code: str) -> MetricResult:
    lines = [ln for ln in code.splitlines() if ln.strip()]
    if not lines:
        return MetricResult("Readable", 0, 0.0, "空文件")
    total = len(lines)
    comment_lines = sum(1 for ln in lines if ln.strip().startswith("#"))
    doc_tokens = 0
    for ln in lines:
        doc_tokens += ln.count('"""')
        doc_tokens += ln.count("'''")
    has_doc = (doc_tokens // 2) > 0
    ratio = comment_lines / total
    score = min(100.0, (ratio / 0.6) * 100.0 + (10.0 if has_doc else 0.0))
    note = f"注释行 {comment_lines}/{total}（{ratio:.1%}），{'含' if has_doc else '不含'}文档字符串"
    return MetricResult("Readable", f"{ratio:.2f}", score, note)

def check_two_sum_return(nums, target, ret) -> bool:
    try:
        i, j = ret
    except Exception:
        return False
    if not (isinstance(i, int) and isinstance(j, int)): return False
    if i == j: return False
    if not (0 <= i < len(nums) and 0 <= j < len(nums)): return False
    return nums[i] + nums[j] == target

def run_correctness(ns: dict, spec: TaskSpec) -> Tuple[float, str, int, int]:
    fn = ns.get(spec.func_name)
    if fn is None or not callable(fn):
        return 0.0, f"未找到函数 {spec.func_name}", 0, len(spec.tests)
    passed = 0
    notes = []
    for (args, expected, tag) in spec.tests:
        m = run_and_measure(fn, *args)
        ok = False
        if m.ok:
            if spec.problem_id == 1:
                ok = check_two_sum_return(args[0], args[1], m.value)
            else:
                ok = (m.value == expected)
        notes.append(f"{tag}: {'✔' if ok else '✘'}")
        if ok:
            passed += 1
    rate = passed / max(1, len(spec.tests))
    return rate*100.0, "；".join(notes), passed, len(spec.tests)

def run_robustness(ns: dict, spec: TaskSpec) -> Tuple[float, str, int, int]:
    fn = ns.get(spec.func_name)
    if fn is None or not callable(fn):
        return 0.0, f"未找到函数 {spec.func_name}", 0, len(spec.robust_tests)
    passed = 0
    notes = []
    for (args, expected, tag) in spec.robust_tests:
        m = run_and_measure(fn, *args)
        ok = False
        if m.ok:
            if expected is None:
                ok = True
            else:
                ok = (m.value == expected)
        notes.append(f"{tag}: {'✔' if ok else '✘'}" + ("" if m.ok else f"（{m.error}）"))
        if ok:
            passed += 1
    rate = passed / max(1, len(spec.robust_tests))
    return rate*100.0, "；".join(notes), passed, len(spec.robust_tests)

def run_efficiency(ns: dict, spec: TaskSpec, quick: bool = False) -> Tuple[float, float, float, str]:
    fn = ns.get(spec.func_name)
    if fn is None or not callable(fn):
        return 0.0, 0.0, 0.0, f"未找到函数 {spec.func_name}"
    sizes = spec.eff_sizes if not quick else spec.eff_sizes[:2]
    times = []
    note_parts = []
    peak_mb_last = 0.0
    for sz in sizes:
        args = spec.eff_gen(sz)
        m = run_and_measure(fn, *args)
        if not m.ok:
            note_parts.append(f"n={sz}: ERROR {m.error}")
            times.append(max(times[-1]*2 if times else 1.0, 1.0))
        else:
            times.append(max(m.seconds, 1e-9))
            note_parts.append(f"n={sz}: {m.seconds:.4f}s")
            peak_mb_last = m.peak_mb
    import math
    xs = [math.log(max(1.0, s)) for s in sizes]
    ys = [math.log(t) for t in times]
    slope, _ = linear_regression(xs, ys)
    diff = abs(slope - spec.expected_slope)
    eff_score = max(0.0, 100.0 - (diff * 80.0))
    eff_score = 0
    return eff_score, times[-1], peak_mb_last, "；".join(note_parts)

def evaluate_problem(problem_id: int, code: str, quick: bool=False) -> ProblemResult:
    spec = TASK_SPECS.get(problem_id)
    if spec is None:
        mr = compute_readability_score(code)
        return ProblemResult(problem_id, "N/A", code, [mr], mr.score, missing_func=True)
    try:
        ns = safe_exec(code)
    except Exception as e:
        read = compute_readability_score(code)
        err_note = f"代码执行错误: {type(e).__name__}: {e}"
        metrics = [
            MetricResult("Correct", "0/NA", 0.0, err_note),
            MetricResult("Robust", "0/NA", 0.0, err_note),
            MetricResult("Eff.", "0", 0.0, err_note),
            MetricResult("Peak MB", "NA", 0.0, err_note),
            MetricResult("Time(last)", "NA", 0.0, err_note),
            read,
        ]
        return ProblemResult(problem_id, spec.func_name, code, metrics, 0.0, missing_func=True)
    read = compute_readability_score(code)
    correct_score, correct_note, pc, pt = run_correctness(ns, spec)
    robust_score, robust_note, rc, rt = run_robustness(ns, spec)
    eff_score, time_last, peak_mb, eff_note = run_efficiency(ns, spec, quick=quick)
    w_correct = 0.40
    w_robust  = 0.20
    #    w_eff     = 0.20
    w_eff     = 0.00
    w_time    = 0.20
    w_mem     = 0.15
    w_read    = 0.05
    time_score = score_from_time(time_last, spec.tref)
    mem_score = score_from_memory(peak_mb, spec.mref)
    metrics = [
        MetricResult("Correct", f"{pc}/{pt}", correct_score, correct_note),
        MetricResult("Robust", f"{rc}/{rt}", robust_score, robust_note),
        MetricResult("Eff.", f"{eff_score:.1f}", eff_score, eff_note),
        MetricResult("Peak MB", f"{peak_mb:.2f}", mem_score, f"基于 tracemalloc 峰值；mref={spec.mref}MB"),
        MetricResult("Time(last)", f"{time_last:.4f}s", time_score, f"参考 tref={spec.tref}s"),
        read,
    ]
    total = (
        w_correct*correct_score +
        w_robust*robust_score +
        w_eff*eff_score +
        w_time*time_score +
        w_mem*mem_score +
        w_read*read.score
    )
    return ProblemResult(problem_id, spec.func_name, code, metrics, total, missing_func=False)