import json
import argparse
import os
from collections import Counter, defaultdict
import re

# ================== 疾病名和常量 ==================
with open('./configs/all_disease_names.json', 'r', encoding='utf-8') as f:
    all_disease_names = set(json.load(f))
all_disease_names.add("正常眼底")

normal_keywords = [
    "正常眼底", 
    "正常", 
]

# ================== 评测主逻辑 ==================
def get_question_type(prompt):
    prompt_lc = prompt.lower()
    if re.search(r"请.*(是|否).*回答", prompt) or re.search(r"请直接回答是或否", prompt) or "请明确回答是或否" in prompt or "请严格使用“是”或“否”回答" in prompt:
        return "judge"
    if re.search(r"(是否|能否|可以|显示的是.*吗)", prompt):
        return "judge"
    if re.search(r"(更可能|更接近|还是|或|哪种疾病)", prompt) and not re.search(r"是或否", prompt):
        return "compare"
    return "diagnosis"

def normalize_bool(text):
    text = text.strip().replace("。", "").replace(".", "").replace("\n", "").replace(" ", "")
    if text in ("是", "是的", "yes", "对", "正确"):
        return "是"
    if text in ("否", "不是", "不对", "no", "错误"):
        return "否"
    if text == "":
        return ""
    if text.startswith("是"):
        return "是"
    if text.startswith("否") or text.startswith("不"):
        return "否"
    return text

def extract_diseases(text, all_names):
    text = re.sub(r"[，。、；：,.!！\n\s]", "", text)
    text = text.replace("疾病诊断为", "")
    matches = []
    for name in sorted(all_names, key=len, reverse=True):
        if name in text:
            matches.append(name)
            text = text.replace(name, "")
    return matches

def is_garbled(text):
    if len(text) > 60:
        from collections import Counter
        counts = Counter(text)
        if any(cnt > len(text) * 0.5 for cnt in counts.values()):
            return True
        for n in range(4, 15):
            for i in range(len(text) - n):
                s = text[i:i+n]
                if text.count(s) > 4:
                    return True
    return False

def evaluate_one(example, all_names):
    prompt = example['prompt']
    predict = example['predict'].strip()
    label = example['label'].strip()
    qtype = get_question_type(prompt)

    if is_garbled(predict):
        return False, qtype, '输出崩坏'

    if qtype == "judge":
        pred_ans = normalize_bool(predict)
        label_ans = normalize_bool(label)
        if not pred_ans:
            return False, qtype, f"空预测"
        correct = pred_ans == label_ans
        reason = "" if correct else f"标准答案:{label_ans},模型输出:{pred_ans}"
        return correct, qtype, reason

    elif qtype == "diagnosis":
        pred_diseases = extract_diseases(predict, all_names)
        label_diseases = extract_diseases(label, all_names)
        for kw in normal_keywords:
            if kw in predict:
                pred_diseases.append("正常眼底")
            if kw in label:
                label_diseases.append("正常眼底")
        pred_set = set(pred_diseases)
        label_set = set(label_diseases)
        if not predict.strip():
            return False, qtype, "空预测"
        if not pred_diseases:
            return False, qtype, f"标准答案:{'、'.join(label_set)},模型输出:{predict.strip()}"
        correct = pred_set == label_set
        reason = "" if correct else f"标准答案:{'、'.join(label_set)},模型输出:{'、'.join(pred_set)}"
        return correct, qtype, reason

    elif qtype == "compare":
        # 部分实际是判断题，检查如果问的是“...吗”还是按判断题判
        if re.search(r"吗[？?]", prompt) and not re.search(r"哪种", prompt):
            pred_ans = normalize_bool(predict)
            label_ans = normalize_bool(label)
            if not pred_ans:
                return False, "judge", f"空预测"
            correct = pred_ans == label_ans
            reason = "" if correct else f"标准答案:{label_ans},模型输出:{pred_ans}"
            return correct, "judge", reason
        pred_diseases = extract_diseases(predict, all_names)
        label_diseases = extract_diseases(label, all_names)
        for kw in normal_keywords:
            if kw in predict:
                pred_diseases.append("正常眼底")
            if kw in label:
                label_diseases.append("正常眼底")
        if not predict.strip():
            return False, qtype, "空预测"
        if not pred_diseases or not label_diseases:
            return False, qtype, f"标准答案:{'、'.join(label_diseases)},模型输出:{predict.strip()}"
        correct = (pred_diseases[0] == label_diseases[0])
        reason = "" if correct else f"标准答案:{label_diseases[0]},模型输出:{pred_diseases[0]}"
        return correct, qtype, reason
    else:
        return False, qtype, "未知题型"

def print_error_examples(errors, qtype, n=2):
    print(f"题型[{qtype}]错误case数: {len(errors)}")
    for e in errors[:n]:
        print(json.dumps(e, ensure_ascii=False, indent=2))

def append_global_log(log_path, summary,src_result_path=None):
    logs = {}
    if os.path.exists(log_path):
        with open(log_path, 'r', encoding='utf-8') as f:
            try:
                logs = json.load(f)
            except:
                logs = {}
    # 如果src_result_path没有指定，则采用实验的日期，
    if src_result_path:
        save_record_name=src_result_path
    else:
        import datetime
        save_record_name = datetime.datetime.now().strftime("%Y-%m-%d")
    logs[save_record_name] = summary
    with open(log_path, 'w', encoding='utf-8') as f:
        json.dump(logs, f, ensure_ascii=False, indent=2)

def main_eval(result_json_path, wrong_case_path, global_logger_path):
    count = Counter()
    errors_by_type = defaultdict(list)
    total = 0
    with open(result_json_path, 'r', encoding='utf-8') as f:
        lines = f.readlines()
        for line in lines:
            example = json.loads(line)
            correct, qtype, reason = evaluate_one(example, all_disease_names)
            count[qtype] += 1
            total += 1
            if correct:
                count[qtype + "_right"] += 1
                count["right"] += 1
            else:
                example['reason'] = reason
                errors_by_type[qtype].append(example)
    acc = count["right"] / total if total else 0
    summary = {
        "acc": acc,
        "judge_acc": count["judge_right"] / count["judge"] if count["judge"] else 0,
        "diagnosis_acc": count["diagnosis_right"] / count["diagnosis"] if count["diagnosis"] else 0,
        "compare_acc": count["compare_right"] / count["compare"] if count["compare"] else 0,
        "total": total,
        "right": count["right"],
        "judge": count["judge"],
        "diagnosis": count["diagnosis"],
        "compare": count["compare"]
    }
    print("="*20)
    print("总准确率:", summary["acc"])
    for t in ["judge", "diagnosis", "compare"]:
        print(f"{t}题型准确率: {summary[t + '_acc']}")
    print("="*20)
    for qtype, errs in errors_by_type.items():
        print_error_examples(errs, qtype, n=2)
    # 错误样本写jsonl
    all_errors = []
    for errs in errors_by_type.values():
        all_errors.extend(errs)
    if all_errors:
        os.makedirs(os.path.dirname(wrong_case_path), exist_ok=True)
        with open(wrong_case_path, "w", encoding="utf-8") as f:
            for e in all_errors:
                f.write(json.dumps(e, ensure_ascii=False) + "\n")
        print(f"全部错误样本已写入 {wrong_case_path}")
    # 日志append写入
    append_global_log(global_logger_path, summary, src_result_path=result_json_path)
    print(f"评测结果已追加写入 {global_logger_path}")

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--result_json_path", type=str, default="experiments/eval_med_pub/full_checkpoint-1200.jsonl")
    parser.add_argument("--wrong_case_path", type=str, default="experiments/eval_med_pub/full_checkpoint-1200-wrong_case.jsonl")
    parser.add_argument("--global_logger", type=str, default="experiments/global_logger.json")
    args = parser.parse_args()
    main_eval(args.result_json_path, args.wrong_case_path, args.global_logger)
