import json
import os
import re
import subprocess
import sys

import pandas as pd
import torch
from tqdm import tqdm
from unsloth import FastLanguageModel, is_bfloat16_supported, get_chat_template

# 配置参数
max_seq_length = 40960
load_in_4bit = False  # 必须关闭4bit量化
dtype = torch.bfloat16 if is_bfloat16_supported() else torch.float16
data_path = "../data/research_data/RQ1/test_data.jsonl"
model_name = "/root/autodl-tmp/model/Qwen3-8B"
eval_result_file = "result/qwen3_8B_thinking_RAG_metrics.txt"
result_file_name = "result/qwen3_8B_model_thinking_rag_outputs.xlsx"


RAG_EMB_MODEL_DIR = "/root/autodl-tmp/model/Qwen3-Embedding-0.6B"
rag_index_path = "../data/research_data/RQ1_rag/combined_index.faiss"
rag_meta_path = "../data/research_data/RQ1_rag/combined_meta.jsonl"


def rag_search_subprocess(index_path, meta_path, model_dir, query, top_k=5):
    # Resolve absolute paths relative to this file to avoid CWD issues
    script_path = os.path.join("../data", "research_data", "RQ1_RAG.py")
    cmd = [
        sys.executable, script_path,
        "--index", rag_index_path,
        "--meta", rag_meta_path,
        "--model", model_dir,
        "--query", query,
        "--topk", str(top_k),
    ]
    p = subprocess.run(cmd, capture_output=True, text=True)
    if p.returncode != 0:
        raise RuntimeError(p.stderr.strip() or "RAG subprocess failed")
    try:
        parsed = json.loads(p.stdout.strip() or "[]")
    except Exception:
        # If the subprocess printed diagnostics to stdout, surface them
        raise RuntimeError(f"RAG returned non‑JSON output: {p.stdout.strip()}")
    if parsed is None or parsed == []:
        print("RAG returned non 无匹配结果")
    return parsed


def trim_text_to_tokens(tokenizer, text: str, max_tokens: int) -> str:
    """Trim a long string to a token budget using the tokenizer."""
    if not text:
        return text
    enc = tokenizer(text, add_special_tokens=False, return_attention_mask=False, return_tensors="pt")
    ids = enc["input_ids"][0]
    if ids.shape[0] <= max_tokens:
        return text
    trimmed = ids[:max_tokens]
    return tokenizer.decode(trimmed, skip_special_tokens=True)


# 加载本地数据集
test_data = []
# 读取jsonl文件
with open(data_path, 'r', encoding='utf-8') as file:
    for line in file:
        data = json.loads(line)
        test_data.append(data)


# 模型输出处理函数
def model_output(messages, model, tokenizer):
    FastLanguageModel.for_inference(model)  # 启用原生推理速度快2倍
    inputs = tokenizer.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,  # Must add for generation
        max_seq_length=max_seq_length - 4096,
        return_tensors="pt",
    ).to("cuda")
    outputs = model.generate(inputs, max_new_tokens=4096, use_cache=True)
    final_answer = tokenizer.batch_decode(outputs)

    # 提取 think 部分和最终输出
    think_pattern = r"<think>(.*?)</think>"
    think_match = re.search(think_pattern, final_answer[-1], re.DOTALL)
    think_content = think_match.group(1).strip() if think_match else "无思考过程"

    output_pattern = r"</think>(.*?)<\|im_end\|>"
    output_match = re.search(output_pattern, final_answer[-1], re.DOTALL)
    output_content = output_match.group(1).strip() if output_match else "模型无输出"

    # 处理 JSON 代码块
    output_lines = output_content.splitlines()
    if len(output_lines) >= 2 and output_lines[0].strip() == "```json" and output_lines[-1].strip() == "```":
        output_content = "\n".join(output_lines[1:-1])

    print(f"think_content=======================")
    print(think_content)
    print(f"output_content=======================")
    print(output_content)
    print("==============================================")
    return think_content, output_content


# 加载训练后的模型
print("模型加载开始=======================")
model, tokenizer = FastLanguageModel.from_pretrained(
    model_name=model_name,
    dtype=dtype,
    load_in_4bit=load_in_4bit,
    max_seq_length=max_seq_length,
)
tokenizer = get_chat_template(
    tokenizer,
    chat_template="qwen-2.5",
)
print("模型加载完成=======================")

# 检查 Excel 文件是否存在，如果存在则加载已有数据
processed_repo_PR_number = {}
if os.path.exists(result_file_name):
    existing_df = pd.read_excel(result_file_name)
    # 构建已处理的 repo_name 和 pr_number 映射
    for _, row in existing_df.iterrows():
        repo_name = row['repo']
        pr_number = row['pr_number']
        if repo_name not in processed_repo_PR_number:
            processed_repo_PR_number[repo_name] = set()
        processed_repo_PR_number[repo_name].add(pr_number)
else:
    existing_df = pd.DataFrame(
        columns=["pr_number", "repo", 'diff_snippet', "prompt", "模型思考", "模型输出", "need_check"])
    # 如果文件不存在，直接创建新文件
    existing_df.to_excel(result_file_name, index=False, engine="openpyxl")

# 使用 tqdm 显示进度条
with pd.ExcelWriter(result_file_name, engine="openpyxl", mode="a", if_sheet_exists="replace") as writer:
    for item in tqdm(test_data, desc="处理进度", unit="条"):
        pr_number = item["pr_number"]
        repo = item['repo_name']
        need_check = item["need_check"]
        diff_snippet = item["diff_snippet"]

        if repo in processed_repo_PR_number and pr_number in processed_repo_PR_number[repo]:
            print(f"{repo}中的第{pr_number}号 已存在，跳过处理=======================")
            continue
        # === Retrieve similar diffs via RAG ===
        retrievals = []
        if rag_index_path and rag_meta_path:
            try:
                rag_results = rag_search_subprocess(rag_index_path, rag_meta_path, RAG_EMB_MODEL_DIR, diff_snippet,
                                                    top_k=5)
                for i, r in enumerate(rag_results, 1):
                    meta = (r.get("meta") or {})
                    original = (meta.get("original_item") or {})
                    candidate_snippet = (original.get("diff_snippet") or "")
                    need_check_candidate = original.get("need_check")
                    fname = ((original.get("before_file") or original.get("after_file") or {}) or {}).get("filename")
                    repo_name_candidate = original.get("repo_name")
                    score = r.get("score", 0.0)
                    retrievals.append(
                        f"[R{i} score={score:.4f} need_check={need_check_candidate} repo_name={repo_name_candidate} file={fname}]\\n{candidate_snippet}")
            except Exception as e:
                retrievals.append(f"[RAG unavailable: {e}]")

        retrieval_text = "\\n\\n".join(retrievals)
        user_content = f"""TARGET_DIFF:
            {diff_snippet}


SIMILAR_DIFFS:
            {retrieval_text}
                """
        user_content = trim_text_to_tokens(tokenizer, user_content, max_seq_length - 1024)
        # === Updated messages with retrieved context ===
        messages = [
            {
                "role": "system",
                "content": (
                    "You are a OpenHarmony code review decision assistant for the OpenHarmony project.\n"
                    "You will be given a TARGET diff snippet(TARGET_DIFF) and, when available, several SIMILAR diff snippets(SIMILAR_DIFFS). "
                    "SIMILAR_DIFFS block format:\n"
                    "  Header line: [R{k} score=<float> need_check=<true|false> repo_name=<str> file=<path>]\n"
                    "  Immediately followed by a unified diff snippet (lines like  '@@', '+', '-').\n"
                    "  Blocks are separated by ONE blank line.\n"
                    "  Notes: score ∈ [0,1] (higher = SIMILAR_DIFFS is more similar with TARGET_DIFF);Treat these only as weak signals.\n"
                    "Your task: decide whether the TARGET change requires a human code review.\n"
                    "You must output only the lowercase word true or false. Do not output anything else (no explanation, no punctuation, no line breaks, no code blocks). "
                    "If the retrieved context conflicts with the TARGET diff, base the decision on the TARGET diff.\n"
                    "Repeat: the final output must be only true or false."
                ),
            },
            {
                "role": "user",
                "content": user_content
                # /no_think""",
            },
        ]

        print(f"{repo}中的第{pr_number}号 模型输出开始=======================")
        think_content, output_content = model_output(messages, model, tokenizer)

        # 将新记录添加到 DataFrame
        new_row = {
            "pr_number": pr_number,
            "repo": repo,
            'diff_snippet': diff_snippet,
            "prompt": messages,
            "模型思考": think_content,
            "模型输出": output_content,
            "need_check": need_check
        }
        existing_df = pd.concat([existing_df, pd.DataFrame([new_row])], ignore_index=True)

        # 写入 Excel 文件
        existing_df.to_excel(writer, index=False)
        writer.book.save(result_file_name)  # 强制写入文件
        print(f"{repo}中的第{pr_number}号 模型输出结束=======================")

print(f"输出结果已保存到 `{result_file_name}`")
# 接着处理完成后，读取result_file_name中的模型输出和need_check字段，计算准确率等指标
print("开始计算模型准确率等指标=======================")
# 读取Excel文件
df = pd.read_excel(result_file_name)

# 初始化计数器
true_positive = 0  # 模型输出true，实际也是true
false_positive = 0  # 模型输出true，实际是false
false_negative = 0  # 模型输出false，实际是true
true_negative = 0  # 模型输出false，实际也是false

# 遍历每一行数据
for index, row in df.iterrows():
    model_output = str(row['模型输出']).strip().lower()
    need_check = str(row['need_check']).strip().lower()

    # 确保模型输出是true或false
    if model_output == 'true':
        if need_check == 'true':
            true_positive += 1
        else:
            false_positive += 1
    elif model_output == 'false':
        if need_check == 'true':
            false_negative += 1
        else:
            true_negative += 1

# 计算准确率、精确率、召回率和F1分数
accuracy = (true_positive + true_negative) / (true_positive + false_positive + false_negative + true_negative)
precision = true_positive / (true_positive + false_positive) if (true_positive + false_positive) > 0 else 0
recall = true_positive / (true_positive + false_negative) if (true_positive + false_negative) > 0 else 0
f1_score = 2 * (precision * recall) / (precision + recall) if (precision + recall) > 0 else 0

# 打印结果
print(f"准确率 (Accuracy): {accuracy:.4f}")
print(f"精确率 (Precision): {precision:.4f}")
print(f"召回率 (Recall): {recall:.4f}")
print(f"F1分数 (F1-Score): {f1_score:.4f}")

# 保存结果到文件
with open(eval_result_file, "w") as f:
    f.write(f"准确率 (Accuracy): {accuracy:.4f}\n")
    f.write(f"精确率 (Precision): {precision:.4f}\n")
    f.write(f"召回率 (Recall): {recall:.4f}\n")
    f.write(f"F1分数 (F1-Score): {f1_score:.4f}\n")

print("指标计算完成，结果已保存到 result/metrics.txt =======================")
