import json
import os
import re

import numpy as np
import pandas as pd
import torch
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
from tqdm import tqdm
from unsloth import FastLanguageModel, is_bfloat16_supported, get_chat_template


# 模型输出处理函数
def model_output(messages, model, tokenizer):
    FastLanguageModel.for_inference(model)  # 启用原生推理速度快2倍
    inputs = tokenizer.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,  # Must add for generation
        return_tensors="pt",
    ).to("cuda")
    outputs = model.generate(inputs, max_new_tokens=max_seq_length, use_cache=True)
    final_answer = tokenizer.batch_decode(outputs)

    # 提取 think 部分和最终输出
    think_pattern = r"<think>(.*?)</think>"
    think_match = re.search(think_pattern, final_answer[-1], re.DOTALL)
    think_content = think_match.group(1).strip() if think_match else "无思考过程"

    output_pattern = r"</think>(.*?)<\|im_end\|>"
    output_match = re.search(output_pattern, final_answer[-1], re.DOTALL)
    output_content = output_match.group(1).strip() if output_match else "模型无输出"

    # 处理 JSON 代码块
    output_lines = output_content.splitlines()
    if len(output_lines) >= 2 and output_lines[0].strip() == "```json" and output_lines[-1].strip() == "```":
        output_content = "\n".join(output_lines[1:-1])

    print(f"think_content=======================")
    print(think_content)
    print(f"output_content=======================")
    print(output_content)
    print("==============================================")
    return think_content, output_content


def calculate_exact_match(predicted, actual):
    """计算精确匹配得分"""
    return 1.0 if str(predicted).strip() == str(actual).strip() else 0.0


def calculate_bleu_score(predicted, actual):
    """计算BLEU得分"""
    # 将代码转换为token列表
    pred_tokens = str(predicted).strip().split()
    actual_tokens = str(actual).strip().split()

    # 处理空字符串情况
    if not pred_tokens or not actual_tokens:
        return 0.0

    # 计算BLEU-4得分
    smoothing = SmoothingFunction().method1
    bleu_score = sentence_bleu(
        [actual_tokens],
        pred_tokens,
        weights=(0.25, 0.25, 0.25, 0.25),
        smoothing_function=smoothing
    )
    return bleu_score


def calculate_codebleu_score(predicted, actual):
    """
    计算CodeBLEU得分（简化版实现）
    注意：这是简化版本，实际应用中建议使用专门的CodeBLEU库
    """
    try:
        # 1. n-gram match (BLEU component)
        bleu_component = calculate_bleu_score(predicted, actual)

        # 2. AST match (简化处理，基于语法结构关键词)
        pred_str = str(predicted)
        actual_str = str(actual)

        # 提取关键词作为AST节点的近似表示
        keywords = ['if', 'else', 'for', 'while', 'class', 'def', 'return', 'import', 'try', 'except']
        pred_keywords = [kw for kw in keywords if kw in pred_str]
        actual_keywords = [kw for kw in keywords if kw in actual_str]

        if len(actual_keywords) > 0:
            ast_match = len(set(pred_keywords) & set(actual_keywords)) / len(set(actual_keywords))
        else:
            ast_match = 1.0 if len(pred_keywords) == 0 else 0.0

        # 3. Data-flow match (简化处理)
        # 这里简单地基于变量名匹配
        import re
        pred_vars = set(re.findall(r'[a-zA-Z_][a-zA-Z0-9_]*', pred_str))
        actual_vars = set(re.findall(r'[a-zA-Z_][a-zA-Z0-9_]*', actual_str))

        if len(actual_vars) > 0:
            dataflow_match = len(pred_vars & actual_vars) / len(actual_vars)
        else:
            dataflow_match = 1.0 if len(pred_vars) == 0 else 0.0

        # 组合各组件得分 (权重可调整)
        codebleu_score = 0.3 * bleu_component + 0.3 * ast_match + 0.4 * dataflow_match
        return codebleu_score

    except Exception as e:
        # 出现错误时回退到普通BLEU
        return calculate_bleu_score(predicted, actual)


# 替换原有评估代码
def evaluate_code_generation(df, eval_result_file):
    """评估代码生成质量"""

    # 初始化统计变量
    total_em = 0
    total_bleu = 0
    total_codebleu = 0
    count = 0

    em_scores = []
    bleu_scores = []
    codebleu_scores = []

    # 遍历数据计算各项指标
    for index, row in df.iterrows():
        model_output = row['模型输出']
        after_code = row['after_code']

        # 计算各项得分
        em_score = calculate_exact_match(model_output, after_code)
        bleu_score = calculate_bleu_score(model_output, after_code)
        codebleu_score = calculate_codebleu_score(model_output, after_code)

        # 累积统计
        total_em += em_score
        total_bleu += bleu_score
        total_codebleu += codebleu_score
        count += 1

        # 保存单个得分
        em_scores.append(em_score)
        bleu_scores.append(bleu_score)
        codebleu_scores.append(codebleu_score)

    # 计算平均得分
    avg_em = total_em / count if count > 0 else 0
    avg_bleu = total_bleu / count if count > 0 else 0
    avg_codebleu = total_codebleu / count if count > 0 else 0

    # 计算标准差
    std_em = np.std(em_scores) if em_scores else 0
    std_bleu = np.std(bleu_scores) if bleu_scores else 0
    std_codebleu = np.std(codebleu_scores) if codebleu_scores else 0

    # 打印结果
    print("=== 代码生成质量评估结果 ===")
    print(f"Exact Match (EM): {avg_em:.4f} ± {std_em:.4f}")
    print(f"BLEU Score: {avg_bleu:.4f} ± {std_bleu:.4f}")
    print(f"CodeBLEU Score: {avg_codebleu:.4f} ± {std_codebleu:.4f}")
    print(f"样本数量: {count}")

    # 保存结果到文件
    with open(eval_result_file, "w") as f:
        f.write("=== 代码生成质量评估结果 ===\n")
        f.write(f"Exact Match (EM): {avg_em:.4f} ± {std_em:.4f}\n")
        f.write(f"BLEU Score: {avg_bleu:.4f} ± {std_bleu:.4f}\n")
        f.write(f"CodeBLEU Score: {avg_codebleu:.4f} ± {std_codebleu:.4f}\n")
        f.write(f"样本数量: {count}\n")

    return {
        'em': avg_em,
        'bleu': avg_bleu,
        'codebleu': avg_codebleu,
        'count': count
    }


def do_test_work(result_file_name, processed_repo_PR_number, model, tokenizer, existing_df):
    # 使用 tqdm 显示进度条
    with pd.ExcelWriter(result_file_name, engine="openpyxl", mode="a", if_sheet_exists="replace") as writer:
        for item in tqdm(test_data, desc="处理进度", unit="条"):
            pr_number = item["pr_number"]
            repo = item['repo_name']
            need_check = item["need_check"]

            if need_check is False:
                print(f"{repo}中的第{pr_number}号 不需要check，跳过处理=======================")
                continue

            before_code = item["before_code"]
            after_code = item["after_code"]
            review_comment = item.get("comment") or ""

            if repo in processed_repo_PR_number and pr_number in processed_repo_PR_number[repo]:
                print(f"{repo}中的第{pr_number}号 已存在，跳过处理=======================")
                continue

            messages = [
                {"role": "system",
                 "content": (
                     "You are an OpenHarmony code repair assistant.\n"
                     "Input: the original code snippet and a user review comment that may be noisy or unreliable.\n\n"
                     "Your task:\n"
                     "• Produce the repaired code only. Do not include any explanations, comments, diffs, or surrounding text.\n"
                     "• Do NOT wrap the output in Markdown fences (no ```), XML/HTML tags, or any other markers.\n"
                     "• Weigh the review comment carefully; follow it only when it clearly improves correctness/security/clarity. If it conflicts or seems wrong, ignore it.\n"
                     "• Match the input scope (file→file, snippet→snippet).\n"
                     "• Start with the first character of code and end with the last — nothing else.\n"), },
                {"role": "user",
                 "content": "Original code:\n"
                            "<code>\n" + before_code + "\n</code>\n\n"
                                                       "Review comment:\n"
                            + review_comment
                            + "\n/no_think"},
            ]

            print(f"{repo}中的第{pr_number}号 模型输出开始=======================")
            think_content, output_content = model_output(messages, model, tokenizer)

            # 将新记录添加到 DataFrame
            new_row = {
                "pr_number": pr_number,
                "repo": repo,
                'before_code': before_code,
                "review_comment": review_comment,
                "prompt": messages,
                "模型思考": think_content,
                "模型输出": output_content,
                "after_code": after_code
            }
            existing_df = pd.concat([existing_df, pd.DataFrame([new_row])], ignore_index=True)

            # 写入 Excel 文件
            existing_df.to_excel(writer, index=False)
            writer.book.save(result_file_name)  # 强制写入文件
            print(f"{repo}中的第{pr_number}号 模型输出结束=======================")


def re_load_checked(result_file_name):
    # 检查 Excel 文件是否存在，如果存在则加载已有数据
    processed_repo_PR_number = {}
    if os.path.exists(result_file_name):
        existing_df = pd.read_excel(result_file_name)
        # 构建已处理的 repo_name 和 pr_number 映射
        for _, row in existing_df.iterrows():
            repo_name = row['repo']
            pr_number = row['pr_number']
            if repo_name not in processed_repo_PR_number:
                processed_repo_PR_number[repo_name] = set()
            processed_repo_PR_number[repo_name].add(pr_number)
    else:
        existing_df = pd.DataFrame(
            columns=["pr_number", "repo", 'before_code', "review_comment", "prompt", "模型思考", "模型输出",
                     "after_code"])
        # 如果文件不存在，直接创建新文件
        os.makedirs(os.path.dirname(result_file_name), exist_ok=True)
        existing_df.to_excel(result_file_name, index=False, engine="openpyxl")
    return processed_repo_PR_number, existing_df


if __name__ == "__main__":
    # 配置参数
    max_seq_length = 40960
    load_in_4bit = False  # 必须关闭4bit量化
    dtype = torch.bfloat16 if is_bfloat16_supported() else torch.float16
    REPO_List = [
        # "account_os_account",
        "arkui_ace_engine",
        # "build",
        # "communication_wifi",
        # "developtools_ace_ets2bundle",
        "multimedia_audio_framework",
        # "web_webview",
        # "xts_acts",
        # "kernel_linux_5.10"
    ]
    OWNER = "openharmony"
    model_name = "/root/autodl-tmp/model/Qwen3-8B"
    # 加载训练后的模型
    print("模型加载开始=======================")
    model, tokenizer = FastLanguageModel.from_pretrained(
        model_name=model_name,
        dtype=dtype,
        load_in_4bit=load_in_4bit,
        max_seq_length=max_seq_length,
    )
    tokenizer = get_chat_template(
        tokenizer,
        chat_template="qwen-2.5",
    )
    print("模型加载完成=======================")

    for repo in REPO_List:
        data_path = f"../data/research_data/RQ2/{repo}/{OWNER}_{repo}_test_data.jsonl"
        result_file_name = f"./result/rq2/{repo}/{OWNER}_{repo}_qwen3_8B_model_no_thinking_outputs.xlsx"
        eval_result_file = f"./result/rq2/{repo}/{OWNER}_{repo}_qwen3_8B_no_thinking_metrics.txt"

        # 加载本地数据集
        test_data = []
        # 读取jsonl文件
        with open(data_path, 'r', encoding='utf-8') as file:
            for line in file:
                data = json.loads(line)
                test_data.append(data)

        processed_repo_PR_number, existing_df = re_load_checked(result_file_name)

        do_test_work(result_file_name, processed_repo_PR_number, model, tokenizer, existing_df)

        print(f"输出结果已保存到 `{result_file_name}`")
        # 接着处理完成后，读取result_file_name中的模型输出和need_check字段，计算准确率等指标
        print("开始计算模型准确率等指标=======================")
        # 读取Excel文件
        df = pd.read_excel(result_file_name)

        evaluate_code_generation(df, eval_result_file)

        print(f"指标计算完成，结果已保存到 {eval_result_file} =======================")
