# Qwen3-8B_RQ2-HRE.py
# -*- coding: utf-8 -*-
import json
import os
import re
import subprocess
import sys
from typing import List, Dict, Any, Tuple

import pandas as pd
import torch
from tqdm import tqdm
from unsloth import FastLanguageModel, is_bfloat16_supported, get_chat_template


def RHE_search_subprocess(RHE_index_path, RHE_meta_path, model_dir, query, top_k, operation, experiences, RQ):
    # Resolve absolute paths relative to this file to avoid CWD issues
    script_path = os.path.join("HRE_RAG.py")
    cmd = [
        sys.executable, script_path,
        "--index", RHE_index_path,
        "--meta", RHE_meta_path,
        "--model", model_dir,
        "--query", query,
        "--topk", str(top_k),
        "--operation", operation,
        "--experiences", experiences,
        "--RQ", RQ
    ]
    try:
        p = subprocess.run(cmd, capture_output=True, text=True)
        if p.returncode != 0:
            raise RuntimeError(p.stderr.strip() or "RHE subprocess failed")
        if operation == "search":
            try:
                parsed = json.loads(p.stdout.strip() or "[]")
            except Exception:
                # If the subprocess printed diagnostics to stdout, surface them
                raise RuntimeError(f"     RHE returned non‑JSON output: {p.stdout.strip()}")
            if parsed is None or parsed == []:
                print("     RHE returned non 无匹配结果")
            return parsed
        else:
            parsed = p.stdout.strip()
            if parsed is None or parsed == []:
                print("     RHE returned non 无匹配结果")
            return parsed
    except Exception as e:
        print(f"     RHE subprocess failed: {e}")
        return []


# =========================
# Utils
# =========================
def ensure_dir(p: str):
    d = os.path.dirname(p) if os.path.splitext(p)[1] else p
    if d and not os.path.exists(d):
        os.makedirs(d, exist_ok=True)


def save_xlsx_append(path: str, df_row: Dict[str, Any]):
    ensure_dir(path)
    if os.path.exists(path):
        df = pd.read_excel(path)
        df = pd.concat([df, pd.DataFrame([df_row])], ignore_index=True)
    else:
        df = pd.DataFrame([df_row])
    df.to_excel(path, index=False, engine="openpyxl")


def read_jsonl(path: str) -> List[dict]:
    if not os.path.exists(path):
        return []
    out = []
    with open(path, "r", encoding="utf-8") as f:
        for line in f:
            line = line.strip()
            if not line:
                continue
            try:
                out.append(json.loads(line))
            except Exception:
                continue
    return out


# =========================
# LLM I/O
# =========================
def load_model():
    print("    加载模型中 ...")
    model, tokenizer = FastLanguageModel.from_pretrained(
        model_name=MODEL_NAME,
        dtype=DTYPE,
        load_in_4bit=LOAD_IN_4BIT,
        max_seq_length=MAX_SEQ_LENGTH,
    )
    tokenizer = get_chat_template(tokenizer, chat_template="qwen-2.5")
    print("    模型就绪")
    return model, tokenizer


def gen_with_messages(messages, model, tokenizer, leave_token_len) -> Tuple[str, str]:
    FastLanguageModel.for_inference(model)
    inputs = tokenizer.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        max_seq_length=MAX_SEQ_LENGTH,
        return_tensors="pt",
    ).to("cuda")
    outputs = model.generate(inputs, max_new_tokens=leave_token_len, use_cache=True)
    text = tokenizer.batch_decode(outputs)[-1]

    think_pattern = r"<think>(.*?)</think>"
    think_match = re.search(think_pattern, text, re.DOTALL)
    think_content = think_match.group(1).strip() if think_match else ""

    output_pattern = r"</think>(.*?)<\|im_end\|>"
    output_match = re.search(output_pattern, text, re.DOTALL)
    output_content = output_match.group(1).strip() if output_match else text.strip()

    # 去除错误的 ```json 包裹
    lines = output_content.splitlines()
    if len(lines) >= 2 and lines[0].strip().lower() == "```json" and lines[-1].strip() == "```":
        output_content = "\n".join(lines[1:-1])

    return think_content, output_content


def trim_text_to_tokens_use_before_code(tokenizer, text: str, before_code: str) -> str:
    """Trim a long string to a token budget using the tokenizer."""
    if not text:
        return text, None
    enc = tokenizer(text, add_special_tokens=False, return_attention_mask=False, return_tensors="pt")
    enc_before_code = tokenizer(before_code, add_special_tokens=False, return_attention_mask=False, return_tensors="pt")
    ids_before_code = enc_before_code["input_ids"][0]
    ids = enc["input_ids"][0]
    if ids.shape[0] <= MAX_SEQ_LENGTH - ids_before_code.shape[0]:
        return text, (MAX_SEQ_LENGTH - ids_before_code.shape[0])
    before_token_len = ids.shape[0]
    trimmed = ids[:MAX_SEQ_LENGTH - ids_before_code.shape[0]]
    after_token_len = trimmed.shape[0]
    print(f"     原始长度 {before_token_len} -> 过长删除后的长度 {after_token_len}")
    # return tokenizer.decode(trimmed, skip_special_tokens=True),(MAX_SEQ_LENGTH - ids_before_code.shape[0])
    return None, None


def generate_diff_check(diff_snippet, model, tokenizer):
    history_review_experiences = RHE_search_subprocess(RHE_index_path, RHE_meta_path, RHE_model_dir,
                                                       diff_snippet, TOPK_HRE, "search", "", "RQ1")
    # 经验放在末尾；为空则不加该段
    hre_block = ""

    if history_review_experiences:
        experiences = []
        for meta_experience in history_review_experiences:
            meta_data = meta_experience["meta"]
            if "experience" in meta_data:
                experience = meta_data["experience"]
                experiences.append(experience)

        txt = "\n".join([f"- {s}" for s in experiences])
        print(f"  generate_diff_check 检索到的历史经验数量：{len(experiences)} \n, 内容如下：\n{txt}")
        hre_block = f"\n\nHISTORICAL_REVIEW_EXPERIENCE:\n{txt}"

    content = f"""
TARGET_DIFF:
    {diff_snippet}

    {hre_block}
    """
    content, leave_token_len = trim_text_to_tokens_use_before_code(tokenizer, content, diff_snippet)

    if content is None:
        print("  generate_diff_check 输入文本过长，无法生成代码")
        return None, None, None, None

    # 3) With-HRE（注入经验）
    messages_hre = [
        {"role": "system",
         "content": (
             "You are an assistant that decides whether a given OpenHarmony diff requires human code review.\n"
             "Inputs:\n"
             "- TARGET_DIFF: a unified diff snippet to evaluate.\n"
             "- HISTORICAL_REVIEW_EXPERIENCE (optional): bullet points from past reviews.\n"
             "Your task:\n"
             "decide whether the TARGET change requires a human code review."
             "If experiences conflict with the diff, rely on the diff.\n"
             "Output format: return exactly one lowercase token: true or false. "
             "Do not output anything else (no explanation, no punctuation, no line breaks, no code blocks). "
             "No explanations, punctuation, spaces, or newlines. No code blocks.\n"
             "Repeat: the final output must be only true or false."
         )},
        {"role": "user",
         "content": content.strip() + "\n /no_think"},
    ]
    think1, out1 = gen_with_messages(messages_hre, model, tokenizer, leave_token_len)
    return think1, out1, history_review_experiences, messages_hre


def summarize_experience(pr_number, repo_name, diff_snippet: str,
                         model_output: str, true_need_check, history_review_experiences, model, tokenizer):
    experiences = []
    if history_review_experiences:
        for meta_experience in history_review_experiences:
            meta_data = meta_experience["meta"]
            if "experience" in meta_data:
                experience = meta_data["experience"]
                experiences.append(experience)
    experience_txt = "\n".join([f"- {s}" for s in experiences])
    print(f"  summarize_experience 检索到的历史经验数量：\n{len(experiences)}")

    hre_block = f"\n\nHISTORICAL_REVIEW_EXPERIENCE:\n{experience_txt}"

    # 生成“一条经验”
    sys_prompt = (
        "You are an assistant that writes a reusable Historical Review Experience guideline for OpenHarmony code-review decisions.\n"
        "Inputs:\n"
        "- TARGET_DIFF: a unified diff snippet.\n"
        "- MODEL_REVIEW_DECISION: model's true/false output for TARGET_DIFF.\n"
        "- TRUE_REVIEW_DECISION: the human-verified true/false label.\n"
        "- HISTORICAL_REVIEW_EXPERIENCE (optional): bullet points from past reviews.\n"
        "Your task:\n"
        "Write one concise, generalizable guideline that helps decide whether similar diffs require human review; "
        "prefer cues that explain why TRUE_REVIEW_DECISION is correct, especially when it differs from MODEL_REVIEW_DECISION.\n"
        "Output format: a single paragraph, plain text only (no Markdown, quotes, code blocks, or newlines)."
    )
    user_prompt = (
            "TARGET_DIFF:\n" + (diff_snippet) + "\n\n"
              "MODEL_REVIEW_DECISION:\n" + (str(model_output)) + "\n\n"
              "TRUE_REVIEW_DECISION:\n" + (str(true_need_check)) + "\n\n"
              "HISTORICAL_REVIEW_EXPERIENCE:\n" + hre_block + "\n"
    )
    user_prompt, leave_token_len = trim_text_to_tokens_use_before_code(tokenizer, user_prompt, diff_snippet)
    if user_prompt is None:
        print("  summarize_experience 输入文本过长，无法生成经验")
        return

    messages = [
        {"role": "system", "content": sys_prompt},
        {"role": "user", "content": user_prompt + "\n /no_think"}
    ]
    _, summary_experiences = gen_with_messages(messages, model, tokenizer, leave_token_len)

    update_experience_list = [{
        "pr_number": pr_number,
        "repo_name": repo_name,
        "diff_snippet": diff_snippet,
        "experience": summary_experiences
    }]

    # 将update_experience转换为字符串并验证是否可以被json化
    try:
        update_experience_str = json.dumps(update_experience_list, ensure_ascii=False)
        # 验证是否可以被json化
        json.loads(update_experience_str)
    except (TypeError, ValueError) as e:
        print(f"     update_experience无法被json化: {e}")
        update_experience_str = "[]"

    result = RHE_search_subprocess(RHE_index_path, RHE_meta_path, RHE_model_dir,
                                   diff_snippet, TOPK_HRE, "update", update_experience_str, "RQ1")
    print(f"  该数据第一次总结经验的结果：{result}，获得的经验是：{summary_experiences}")


def update_experience(pr_number, repo_name, diff_snippet: str,
                      model_output: str, true_need_check: str, history_review_experiences, model, tokenizer):
    experiences_dict = {}
    if history_review_experiences:
        for meta_experience in history_review_experiences:
            meta_data = meta_experience["meta"]
            if "experience" in meta_data:
                pr_number = meta_data["pr_number"]
                repo_name = meta_data["repo_name"]
                diff_snippet_temp = meta_data["diff_snippet"]
                experience = meta_data["experience"]
                key = (pr_number, repo_name, diff_snippet_temp)
                experiences_dict[key] = experience
    if experiences_dict.__len__() == 0:
        print(f"  仓库{repo_name},中{pr_number}没有找到历史经验，跳过更新")
        return
    print(f"  update_experience 检索到的历史经验数量：{len(experiences_dict)}")
    update_experiences_list = []
    for key, candidate_experience in experiences_dict.items():
        print(f"     正在处理第{key[0]}的历史经验 更新")
        # 生成“一条经验”（≤20词）
        sys_prompt = (
            "You are an assistant that optimizes a Historical Review Experience guideline used for OpenHarmony code-review decisions.\n"
            "Inputs:\n"
            "- HISTORICAL_REVIEW_EXPERIENCE: the candidate guideline.\n"
            "- TARGET_DIFF: a unified diff snippet.\n"
            "- MODEL_REVIEW_DECISION: model's true/false output for TARGET_DIFF.\n"
            "- TRUE_REVIEW_DECISION: the human-verified true/false label.\n"
            "Your task:\n"
            "Keep the candidate if it already leads to the TRUE_REVIEW_DECISION; "
            "otherwise, revise it so future models predict the TRUE_REVIEW_DECISION on similar diffs. "
            "Prefer cues that explain the ground-truth signal; remain project-agnostic and actionable\n"
            "Output format: a single paragraph, plain text only (no Markdown, quotes, code blocks, or newlines)."
        )
        user_prompt = (
                "HISTORICAL_REVIEW_EXPERIENCE:\n" + candidate_experience + "\n\n"
                "TARGET_DIFF:\n" + (diff_snippet) + "\n\n"
               "MODEL_REVIEW_DECISION:\n" + (str(model_output)) + "\n\n"
                "TRUE_REVIEW_DECISION:\n" + (str(true_need_check)) + "\n\n"
        )
        user_prompt, leave_token_len = trim_text_to_tokens_use_before_code(tokenizer, user_prompt, diff_snippet)

        if user_prompt is None:
            print("  update_experience 输入文本过长，无法生成经验")
            continue

        messages = [
            {"role": "system", "content": sys_prompt},
            {"role": "user", "content": user_prompt + "\n /no_think"}
        ]
        _, update_experiences = gen_with_messages(messages, model, tokenizer, leave_token_len)

        update_experiences_list.append({
            "pr_number": key[0],
            "repo_name": key[1],
            "diff_snippet": key[2],
            "experience": update_experiences
        })
    # 将update_experience转换为字符串并验证是否可以被json化
    try:
        update_experience_str = json.dumps(update_experiences_list, ensure_ascii=False)
        # 验证是否可以被json化
        json.loads(update_experience_str)
    except (TypeError, ValueError) as e:
        print(f"     update_experience无法被json化: {e}")
        update_experience_str = "[]"
    result = RHE_search_subprocess(RHE_index_path, RHE_meta_path, RHE_model_dir,
                                   diff_snippet, TOPK_HRE, "update", str(update_experience_str), "RQ1")
    print(f"  更新经验的结果：{result}")


if __name__ == "__main__":
    # =========================
    # Config
    # =========================
    # 数据与模型
    DATA_PATH = "../../data/research_data/RQ2/test_data.jsonl"  # 也可切到 train_data.jsonl
    MODEL_NAME = "/root/autodl-tmp/model/Qwen3-8B"

    # 经验存储（追加式、永不裁剪/合并）
    RHE_index_path = "./history_review_experience/need_check_hre_index.faiss"
    RHE_meta_path = "./history_review_experience/need_check_refinement_hre_meta.jsonl"
    RHE_model_dir = "/root/autodl-tmp/model/Qwen3-Embedding-0.6B"

    # 评估输出
    RESULT_XLSX = "./result_RQ1/qwen3_8B_test_HRE_outputs.xlsx"
    METRIC_FILE = "./result_RQ1/qwen3_8B_test_HRE_metrics.txt"

    # 运行参数
    MAX_SEQ_LENGTH = 40960
    LOAD_IN_4BIT = False
    DTYPE = torch.bfloat16 if is_bfloat16_supported() else torch.float16
    TOPK_HRE = 6  # 每条样本注入的经验条数上限

    # 0) 准备
    ensure_dir(RESULT_XLSX)
    data = read_jsonl(DATA_PATH)

    # 断点重传
    processed_repo_pr_code = set()
    if os.path.exists(RESULT_XLSX):
        existing_df = pd.read_excel(RESULT_XLSX)
        # 构建已处理的 repo_name 和 pr_number 映射
        for _, row in existing_df.iterrows():
            repo_name = row['repo']
            pr_number = row['pr_number']
            temp_code_diff = row['code_diff']
            processed_repo_pr_code.add((repo_name, pr_number, temp_code_diff))
    # 加载模型
    gen_model, gen_tokenizer = load_model()

    # 输出聚合
    results = []

    for item in tqdm(data, desc="RQ2-HRE processing", unit="item"):

        pr_number = item.get("pr_number")
        repo = item.get("repo_name")
        code_diff = item.get("diff_snippet") or ""
        true_review_decision = item.get("need_check")

        check_done_key = (repo, pr_number, code_diff)
        if check_done_key in processed_repo_pr_code:
            print(f"      跳过已处理样本：仓库 {repo}，PR号 {pr_number}。")
            continue
        after_code = item.get("after_code") or ""
        review_comment = item.get("comment") or ""
        print(f"      仓库 {repo}，PR号 {pr_number}，修复开始=======================")

        # 1) 修复代码
        think1, out1, history_review_experiences, messages_hre = generate_diff_check(code_diff,
                                                                                     gen_model,
                                                                                     gen_tokenizer)
        if not think1 and not out1 and not history_review_experiences and not messages_hre:
            print(f"      仓库 {repo}，PR号 {pr_number}，判断异常")
            continue
        print(f"      仓库 {repo}，PR号 {pr_number}，判断结束, 真实值是 {true_review_decision}，模型输出是 {out1}")
        # 在构造 new_row 之前新增这一行
        row_is_correct = str(out1).strip().lower() == str(true_review_decision).strip().lower()

        # # 5) 总结经验
        # print(f"      仓库 {repo}，PR号 {pr_number}，Summary_experience 开始")
        # summarize_experience(pr_number, repo, code_diff,
        #                      out1, true_review_decision,
        #                      history_review_experiences,
        #                      gen_model, gen_tokenizer)
        # # 6) 更新经验
        # print(f"      仓库 {repo}，PR号 {pr_number}，update_experience 开始")
        # update_experience(pr_number, repo, code_diff,
        #                   out1, true_review_decision,
        #                   history_review_experiences,
        #                   gen_model, gen_tokenizer)

        # 8) 记录到 xlsx
        row = {
            "pr_number": pr_number,
            "repo": repo,
            "code_diff": code_diff,
            "prompt_with_hre": messages_hre,
            "think_with_hre": think1,
            "output_with_hre": out1,
            "true_review_decision": true_review_decision,
            "is_correct": row_is_correct,  # ← 新增逐行一致性
        }
        save_xlsx_append(RESULT_XLSX, row)
        results.append(row)

    # 9) 汇总评估
    # 开始计算模型准确率等指标=======================
    df = pd.read_excel(RESULT_XLSX)

    # 逐行计算是否一致（模型输出是否与真值匹配）
    is_match_flags = []
    true_positive = 0
    false_positive = 0
    false_negative = 0
    true_negative = 0

    for _, row in df.iterrows():
        model_output_val = str(row['output_with_hre']).strip().lower()
        need_check_val = str(row['true_review_decision']).strip().lower()
        is_match = (model_output_val == need_check_val)
        is_match_flags.append(is_match)
        if model_output_val == 'true':
            if need_check_val == 'true':
                true_positive += 1
            else:
                false_positive += 1
        elif model_output_val == 'false':
            if need_check_val == 'true':
                false_negative += 1
            else:
                true_negative += 1

    # 汇总四类计数并计算 Accuracy / Precision / Recall / F1
    total = true_positive + false_positive + false_negative + true_negative
    accuracy = (true_positive + true_negative) / total if total > 0 else 0
    precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
    recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
    f1 = 2 * (precision * recall) / (precision + recall) if (precision + recall) > 0 else 0

    print(f"Accuracy: {accuracy:.4f}")
    print(f"Precision: {precision:.4f}")
    print(f"Recall: {recall:.4f}")
    print(f"F1: {f1:.4f}")

    with open(METRIC_FILE, "w") as f:
        f.write(f"Accuracy: {accuracy:.4f}\n")
        f.write(f"Precision: {precision:.4f}\n")
        f.write(f"Recall: {recall:.4f}\n")
        f.write(f"F1: {f1:.4f}\n")

    print("Metric computation done; results saved.")
