# Qwen3-8B_RQ2-HRE.py
# -*- coding: utf-8 -*-
import json
import os
import re
import subprocess
import sys
from typing import List, Dict, Any, Tuple

import numpy as np
import pandas as pd
import torch
from tqdm import tqdm
from unsloth import FastLanguageModel, is_bfloat16_supported, get_chat_template

from eval_code_sim import calculate_exact_match, calculate_bleu_score, calculate_codebleu_score


def RHE_search_subprocess(RHE_index_path, RHE_meta_path, model_dir, query, top_k, operation, experiences,RQ):
    # Resolve absolute paths relative to this file to avoid CWD issues
    script_path = os.path.join("HRE_RAG.py")
    cmd = [
        sys.executable, script_path,
        "--index", RHE_index_path,
        "--meta", RHE_meta_path,
        "--model", model_dir,
        "--query", query,
        "--topk", str(top_k),
        "--operation", operation,
        "--experiences", experiences,
        "--RQ", RQ
    ]
    try:
        p = subprocess.run(cmd, capture_output=True, text=True)
        if p.returncode != 0:
            raise RuntimeError(p.stderr.strip() or "RHE subprocess failed")
        if operation == "search":
            try:
                parsed = json.loads(p.stdout.strip() or "[]")
            except Exception:
                # If the subprocess printed diagnostics to stdout, surface them
                raise RuntimeError(f"     RHE returned non‑JSON output: {p.stdout.strip()}")
            if parsed is None or parsed == []:
                print("     RHE returned non 无匹配结果")
            return parsed
        else:
            parsed = p.stdout.strip()
            if parsed is None or parsed == []:
                print("     RHE returned non 无匹配结果")
            return parsed
    except Exception as e:
        print(f"     RHE subprocess failed: {e}")
        return []


# =========================
# Utils
# =========================
def ensure_dir(p: str):
    d = os.path.dirname(p) if os.path.splitext(p)[1] else p
    if d and not os.path.exists(d):
        os.makedirs(d, exist_ok=True)


def save_xlsx_append(path: str, df_row: Dict[str, Any]):
    ensure_dir(path)
    if os.path.exists(path):
        df = pd.read_excel(path)
        df = pd.concat([df, pd.DataFrame([df_row])], ignore_index=True)
    else:
        df = pd.DataFrame([df_row])
    df.to_excel(path, index=False, engine="openpyxl")


def read_jsonl(path: str) -> List[dict]:
    if not os.path.exists(path):
        return []
    out = []
    with open(path, "r", encoding="utf-8") as f:
        for line in f:
            line = line.strip()
            if not line:
                continue
            try:
                out.append(json.loads(line))
            except Exception:
                continue
    return out


# =========================
# LLM I/O
# =========================
def load_model():
    print("    加载模型中 ...")
    model, tokenizer = FastLanguageModel.from_pretrained(
        model_name=MODEL_NAME,
        dtype=DTYPE,
        load_in_4bit=LOAD_IN_4BIT,
        max_seq_length=MAX_SEQ_LENGTH,
    )
    tokenizer = get_chat_template(tokenizer, chat_template="qwen-2.5")
    print("    模型就绪")
    return model, tokenizer


def gen_with_messages(messages, model, tokenizer,leave_token_len) -> Tuple[str, str]:
    FastLanguageModel.for_inference(model)
    inputs = tokenizer.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        max_seq_length=MAX_SEQ_LENGTH,
        return_tensors="pt",
    ).to("cuda")
    outputs = model.generate(inputs, max_new_tokens=leave_token_len, use_cache=True)
    text = tokenizer.batch_decode(outputs)[-1]

    think_pattern = r"<think>(.*?)</think>"
    think_match = re.search(think_pattern, text, re.DOTALL)
    think_content = think_match.group(1).strip() if think_match else ""

    output_pattern = r"</think>(.*?)<\|im_end\|>"
    output_match = re.search(output_pattern, text, re.DOTALL)
    output_content = output_match.group(1).strip() if output_match else text.strip()

    # 去除错误的 ```json 包裹
    lines = output_content.splitlines()
    if len(lines) >= 2 and lines[0].strip().lower() == "```json" and lines[-1].strip() == "```":
        output_content = "\n".join(lines[1:-1])

    return think_content, output_content


def trim_text_to_tokens_use_before_code(tokenizer, text: str, before_code: str) -> str:
    """Trim a long string to a token budget using the tokenizer."""
    if not text:
        return text, None
    enc = tokenizer(text, add_special_tokens=False, return_attention_mask=False, return_tensors="pt")
    enc_before_code = tokenizer(before_code, add_special_tokens=False, return_attention_mask=False, return_tensors="pt")
    ids_before_code = enc_before_code["input_ids"][0]
    ids = enc["input_ids"][0]
    if ids.shape[0] <= MAX_SEQ_LENGTH - ids_before_code.shape[0]:
        return text,(MAX_SEQ_LENGTH - ids_before_code.shape[0])
    before_token_len=ids.shape[0]
    trimmed = ids[:MAX_SEQ_LENGTH - ids_before_code.shape[0]]
    after_token_len=trimmed.shape[0]
    print(f"     原始长度 {before_token_len} -> 过长删除后的长度 {after_token_len}")
    # return tokenizer.decode(trimmed, skip_special_tokens=True),(MAX_SEQ_LENGTH - ids_before_code.shape[0])
    return None,None


def generate_refinement_code(before_code, review_comment, model, tokenizer):
    history_review_experiences = RHE_search_subprocess(RHE_index_path, RHE_meta_path, RHE_model_dir,
                                                       before_code, TOPK_HRE, "search", "","RQ2")
    # 经验放在末尾；为空则不加该段
    hre_block = ""

    if history_review_experiences:
        experiences = []
        for meta_experience in history_review_experiences:
            meta_data = meta_experience["meta"]
            if "experience" in meta_data:
                experience = meta_data["experience"]
                experiences.append(experience)

        txt = "\n".join([f"- {s}" for s in experiences])
        print(f"  generate_refinement_code 检索到的历史经验数量：{len(experiences)} \n, 内容如下：\n{txt}")
        hre_block = f"\n\nHISTORICAL_REVIEW_EXPERIENCE:\n{txt}"

    content = f"""
ORIGINAL_CODE:
    {before_code}

REVIEW_COMMENT:
    {review_comment}
    {hre_block}
    """
    content,leave_token_len=trim_text_to_tokens_use_before_code(tokenizer, content, before_code)

    if content is None:
        print("  generate_refinement_code 输入文本过长，无法生成代码")
        return None, None, None, None

    # 3) With-HRE（注入经验）
    messages_hre = [
        {"role": "system",
         "content": (
             "You are an OpenHarmony code repair assistant.\n"
             "Input: ORIGINAL_CODE (the original code snippet), REVIEW_COMMENT and HISTORICAL_REVIEW_EXPERIENCE that may be noisy or unreliable.\n"
             "Your task:\n"
             "• Produce the repaired code only. Do not include any explanations, comments, diffs, or surrounding text.\n"
             "• Do NOT wrap the output in Markdown fences, XML/HTML tags, or any other markers.\n"
             "• Weigh the REVIEW_COMMENT and HISTORICAL_REVIEW_EXPERIENCE carefully; "
             " follow it only when it clearly improves correctness/security/clarity. "
             " If it conflicts or seems wrong, ignore it.\n"
             "• Match the input scope (file→file, snippet→snippet).\n"
             "• When things conflict, REVIEW_COMMENT and HISTORICAL_REVIEW_EXPERIENCE are weak signals.\n"
             "• If no fix is needed or a safe fix cannot be derived, return ORIGINAL_CODE unchanged.\n"
             "• Start with the first character of code and end with the last — nothing else.\n"
         )},
        {"role": "user",
         "content": content.strip() + "\n /no_think"},
    ]
    think1, out1 = gen_with_messages(messages_hre, model, tokenizer,leave_token_len)
    return think1, out1, history_review_experiences, messages_hre


def summarize_experience(pr_number, repo_name, before_code: str, review_comment: str,
                         model_output: str, after_code: str, history_review_experiences, model, tokenizer):
    experiences = []
    if history_review_experiences:
        for meta_experience in history_review_experiences:
            meta_data = meta_experience["meta"]
            if "experience" in meta_data:
                experience = meta_data["experience"]
                experiences.append(experience)
    experience_txt = "\n".join([f"- {s}" for s in experiences])
    print(f"  summarize_experience 检索到的历史经验数量：\n{len(experiences)}")

    hre_block = f"\n\nHISTORICAL_REVIEW_EXPERIENCE:\n{experience_txt}"

    # 生成“一条经验”
    sys_prompt = (
        "You are the Historical Review Experience summary expert for OpenHarmony code repair.\n"
        "Your role is to synthesize reusable, generalizable guidance for future automated repairs.\n"
        "You will receive ORIGINAL_CODE, MODEL_REPAIRED_CODE, GROUND_TRUTH_CODE, REVIEW_COMMENT, "
        "and HISTORICAL_REVIEW_EXPERIENCE.\n"
        "Requirements:\n"
        "- Be language- and project-agnostic (avoid repo or file names).\n"
        "- Be actionable and specific.\n"
        "- When MODEL_REPAIRED_CODE differs from GROUND_TRUTH_CODE, explain the key differences and why is the ground truth.\n"
        "- Highlight common pitfalls and failure modes; include minimal self-contained code or pseudocode snippets when clarifying.\n"
        "- Do not ask questions or include meta commentary.\n"
        "- Output only one paragraph."
    )
    user_prompt = (
            "Using the following artifacts, write experience notes suitable for reuse in future code repairs.\n\n"
            "ORIGINAL_CODE:\n" + (before_code or "") + "\n\n"
             "MODEL_REPAIRED_CODE:\n" + (model_output or "") + "\n\n"
             "GROUND_TRUTH_CODE:\n" + ( after_code or "") + "\n\n"
              "REVIEW_COMMENT:\n" + (review_comment or "") + "\n"
               "HISTORICAL_REVIEW_EXPERIENCE:\n" + hre_block + "\n"
    )
    user_prompt,leave_token_len = trim_text_to_tokens_use_before_code(tokenizer, user_prompt, before_code)
    if user_prompt is None:
        print("  summarize_experience 输入文本过长，无法生成经验")
        return

    messages = [
        {"role": "system", "content": sys_prompt},
        {"role": "user", "content": user_prompt + "\n /no_think"}
    ]
    _, summary_experiences = gen_with_messages(messages, model, tokenizer,leave_token_len)

    update_experience_list = [{
        "pr_number": pr_number,
        "repo_name": repo_name,
        "before_code": before_code,
        "experience": summary_experiences
    }]
    
    # 将update_experience转换为字符串并验证是否可以被json化
    try:
        update_experience_str = json.dumps(update_experience_list, ensure_ascii=False)
        # 验证是否可以被json化
        json.loads(update_experience_str)
    except (TypeError, ValueError) as e:
        print(f"     update_experience无法被json化: {e}")
        update_experience_str = "[]"

    result = RHE_search_subprocess(RHE_index_path, RHE_meta_path, RHE_model_dir,
                                   before_code, TOPK_HRE, "update", update_experience_str,"RQ2")
    print(f"  保存经验的结果：{result}")


def update_experience(pr_number, repo_name, before_code: str, review_comment: str,
                      model_output: str, after_code: str, history_review_experiences, model, tokenizer):
    experiences_dict = {}
    if history_review_experiences:
        for meta_experience in history_review_experiences:
            meta_data = meta_experience["meta"]
            if "experience" in meta_data:
                pr_number = meta_data["pr_number"]
                repo_name = meta_data["repo_name"]
                before_code = meta_data["before_code"]
                experience = meta_data["experience"]
                key = (pr_number, repo_name, before_code)
                experiences_dict[key] = experience
    if experiences_dict.__len__() == 0:
        print(f"  仓库{repo_name},中{pr_number}没有找到历史经验，跳过更新")
        return
    print(f"  update_experience 检索到的历史经验数量：{len(experiences_dict)}")
    update_experiences_list = []
    for key, candidate_experience in experiences_dict.items():
        print(f"     正在处理第{key[0]}的历史经验 更新")
        # 生成“一条经验”（≤20词）
        sys_prompt = (
            "You are a Historical Review Experience Optimizer for automated code repair on OpenHarmony.\n"
             "The HISTORICAL_REVIEW_EXPERIENCE was used to repair the ORIGINAL_CODE, resulting in the MODEL_REPAIRED_CODE.\n"
            "The actual correct repair is represented by the GROUND_TRUTH_CODE.\n"
            "Analyze these three elements to determine if the historical experience should be optimized."
            "• If NO optimization is needed, output the ORIGINAL HISTORICAL_REVIEW_EXPERIENCE content.\n"
            "• If optimization is needed, output the improved experience content.\n"
            "Rules:\n"
            "- Output ONLY the final experience text (no explanations, no analysis, no metadata).\n"
            "- Keep it language- and project-agnostic; be actionable and specific; you may include minimal pseudo-code if clarifying.\n"
            "- Do not ask questions or include meta commentary.\n"
            "- Output only one paragraph."
        )
        user_prompt = (
                 "HISTORICAL_REVIEW_EXPERIENCE:\n" + candidate_experience + "\n\n"
                 "ORIGINAL_CODE:\n" + (before_code or "") + "\n\n"
                 "MODEL_REPAIRED_CODE:\n" + (model_output or "") + "\n\n"
                 "GROUND_TRUTH_CODE:\n" + (after_code or "") + "\n\n"
                 "Decide and output the FINAL EXPERIENCE TEXT now."
        )
        user_prompt,leave_token_len = trim_text_to_tokens_use_before_code(tokenizer, user_prompt, before_code)

        if user_prompt is None:
            print("  update_experience 输入文本过长，无法生成经验")
            continue

        messages = [
            {"role": "system", "content": sys_prompt},
            {"role": "user", "content": user_prompt + "\n /no_think"}
        ]
        _, update_experiences = gen_with_messages(messages, model, tokenizer,leave_token_len)

        update_experiences_list.append({
            "pr_number": key[0],
            "repo_name": key[1],
            "before_code": key[2],
            "experience": update_experiences
        })
     # 将update_experience转换为字符串并验证是否可以被json化
    try:
        update_experience_str = json.dumps(update_experiences_list, ensure_ascii=False)
        # 验证是否可以被json化
        json.loads(update_experience_str)
    except (TypeError, ValueError) as e:
        print(f"     update_experience无法被json化: {e}")
        update_experience_str = "[]"
    result = RHE_search_subprocess(RHE_index_path, RHE_meta_path, RHE_model_dir,
                                   before_code, TOPK_HRE, "update", str(update_experience_str),"RQ2")
    print(f"  更新经验的结果：{result}")


if __name__ == "__main__":
    # =========================
    # Config
    # =========================
    # 数据与模型
    DATA_PATH = "../../data/research_data/RQ2/train_data.jsonl"  # 也可切到 train_data.jsonl
    MODEL_NAME = "/root/autodl-tmp/model/Qwen3-8B"

    # 经验存储（追加式、永不裁剪/合并）
    RHE_index_path = "./history_review_experience/code_refinement_hre_index.faiss"
    RHE_meta_path = "./history_review_experience/code_refinement_hre_meta.jsonl"
    RHE_model_dir = "/root/autodl-tmp/model/Qwen3-Embedding-0.6B"

    # 评估输出
    RESULT_XLSX = "./result_RQ2/qwen3_8B_HRE_outputs.xlsx"
    METRIC_FILE = "./result_RQ2/qwen3_8B_HRE_metrics.txt"

    # 运行参数
    MAX_SEQ_LENGTH = 40960
    LOAD_IN_4BIT = False
    DTYPE = torch.bfloat16 if is_bfloat16_supported() else torch.float16
    TOPK_HRE = 6  # 每条样本注入的经验条数上限

    # 0) 准备
    ensure_dir(RESULT_XLSX)
    data = read_jsonl(DATA_PATH)

    # 断点重传
    processed_repo_pr_code = set()
    if os.path.exists(RESULT_XLSX):
        existing_df = pd.read_excel(RESULT_XLSX)
        # 构建已处理的 repo_name 和 pr_number 映射
        for _, row in existing_df.iterrows():
            repo_name = row['repo']
            pr_number = row['pr_number']
            before_code=row['before_code']
            processed_repo_pr_code.add((repo_name, pr_number, before_code))
    # 加载模型
    model, tokenizer = load_model()

    # 输出聚合
    results = []

    for item in tqdm(data, desc="RQ2-HRE processing", unit="item"):


        pr_number = item.get("pr_number")
        repo = item.get("repo_name")
        before_code = item.get("before_code") or ""



        if not item.get("need_check", True):
            print(f"      跳过不需要检查的样本：仓库 {repo}，PR号 {pr_number}。========================")
            # 只评需要修复的样本；与您现有逻辑一致
            continue

        if ((repo=="multimedia_audio_framework" and pr_number==4374) or (repo=="multimedia_audio_framework" and pr_number==2076) or
                (repo == "multimedia_audio_framework" and pr_number == 3513) or
                (repo=="xts_acts" and pr_number==20708) or (repo=="xts_acts" and pr_number==9294) or (repo=="xts_acts" and pr_number==22144) or  (repo=="xts_acts" and pr_number==20018)):
            print("运行过长的样本，跳过")
            continue

        check_done_key = (repo, pr_number, before_code)
        if check_done_key in processed_repo_pr_code:
            print(f"      跳过已处理样本：仓库 {repo}，PR号 {pr_number}。")
            continue
        after_code = item.get("after_code") or ""
        review_comment = item.get("comment") or ""
        print(f"      仓库 {repo}，PR号 {pr_number}，修复开始=======================")

        # 1) 修复代码
        think1, out1, history_review_experiences, messages_hre = generate_refinement_code(before_code, review_comment,
                                                                                          model,
                                                                                          tokenizer)
        if not think1 and not out1 and not history_review_experiences and not messages_hre:
            print(f"      仓库 {repo}，PR号 {pr_number}，修复异常")
            continue
        print(f"      仓库 {repo}，PR号 {pr_number}，修复结果结束")
        # 4) 计算指标
        em1 = calculate_exact_match(out1, after_code)
        bleu1= calculate_bleu_score(out1,after_code)
        cbleu1 =calculate_codebleu_score(out1, after_code)

        # 5) 总结经验
        print(f"      仓库 {repo}，PR号 {pr_number}，Summary_experience 开始")
        summarize_experience(pr_number, repo, before_code, review_comment, out1, after_code, history_review_experiences,
                             model, tokenizer)
        # 6) 更新经验
        print(f"      仓库 {repo}，PR号 {pr_number}，update_experience 开始")
        update_experience(pr_number, repo, before_code, review_comment, out1, after_code, history_review_experiences,
                             model, tokenizer)


        # 8) 记录到 xlsx
        row = {
            "pr_number": pr_number,
            "repo": repo,
            "before_code": before_code,
            "review_comment": review_comment,
            "prompt_with_hre": messages_hre,
            "think_with_hre": think1,
            "output_with_hre": out1,
            "after_code": after_code,
            "EM_hre": em1,
            "BLEU_hre": bleu1,
            "CodeBLEU_hre": cbleu1,
        }
        save_xlsx_append(RESULT_XLSX, row)
        results.append(row)

    # 9) 汇总评估
    if results:
        em_h = np.mean([r["EM_hre"] for r in results])
        bleu_h = np.mean([r["BLEU_hre"] for r in results])
        cbleu_h = np.mean([r["CodeBLEU_hre"] for r in results])

        ensure_dir(METRIC_FILE)
        with open(METRIC_FILE, "w", encoding="utf-8") as f:
            f.write("=== RQ2 HRE Evaluation ===\n")
            f.write(f"With HRE   EM={em_h:.4f} BLEU={bleu_h:.4f} CodeBLEU={cbleu_h:.4f}\n")
        print("      评估完成，结果写入：", METRIC_FILE)
    else:
        print("      无可评估样本。")
