import os
import random
import re

import javalang
import torch
from app.logic.train.file_recommand import get_recommend_filelist
from app.logic.utils.filepath import PATCH_DIR
from app.logic.utils.filepath import REPORT_DIR
from app.logic.utils.patch_analysis import PatchAnalysis
from sklearn.metrics.pairwise import cosine_similarity
from transformers import BertTokenizer, BertForMaskedLM
from transformers import RobertaTokenizer, RobertaModel
from zss import simple_distance, Node


def wrap_statement(statement):
    """
    将单条Java语句包装为一个合法的Java程序。
    """
    # 如果 statement 是列表，将其转换为字符串
    if isinstance(statement, list):
        statement = " ".join(statement)
    return f"""
    public class Wrapper {{
        public void wrappedMethod() {{
            {statement}
        }}
    }}
    """


def validate_java_code(code):
    """
    验证 Java 代码是否有效。
    - 检查未闭合的块注释。
    - 确保代码结构合理。
    """
    # 检查未闭合的块注释
    if code.count("/*") > code.count("*/"):
        print(f"Unclosed block comment found:[{code}], try to repair...")
        code += " */"  # 补充关闭注释符号
        print(f"After Repair:[{code}]")

    # 检查未闭合的单引号字符串
    pattern = r"(?<!\\)" + re.escape('"')
    single_quote_count = len(re.findall(pattern, code))
    if single_quote_count % 2 != 0:
        print(f"unclosed single quote found:[{code}], try to repair...")
        code += '"'
        print(f"After Repair:[{code}]")

    # 检查未闭合的双引号字符串
    pattern = r"(?<!\\)" + re.escape("'")
    double_quote_count = len(re.findall(pattern, code))
    if double_quote_count % 2 != 0:
        print(f"unclosed double quote found:[{code}], try to repair...")
        code += "'"
        print(f"After Repair:[{code}]")

    return code


def parse_java_to_ast(code):
    """
    将Java代码解析为抽象语法树(AST)。
    """
    try:
        tree = javalang.parse.parse(code)
    except javalang.parser.JavaSyntaxError as e:
        print(f"Failed to parse Java code{code} : {e}")
        return Node("Empty"), 0

    def build_tree(node, visited=None, count=0):
        if visited is None:
            visited = set()

        # 如果节点已经访问过，跳过防止死循环
        if id(node) in visited:
            return None, count
        visited.add(id(node))

        # 检查节点类型
        if isinstance(node, (javalang.ast.Node,)):
            root = Node(type(node).__name__)  # 创建当前节点
            count += 1  # 增加节点计数
            for child in node.children:
                child_tree, count = build_tree(child, visited, count)  # 递归构造子树
                if child_tree:
                    root.addkid(child_tree)
            return root, count
        elif isinstance(node, list):  # 如果节点是列表
            root = Node("List")
            count += 1  # 增加节点计数
            for child in node:
                child_tree, count = build_tree(child, visited, count)
                if child_tree:
                    root.addkid(child_tree)
            return root, count
        else:
            return Node(str(node)), count  # 叶子节点，增加计数

    root, count = build_tree(tree)

    return root, count


def calculate_similarity(ast1, ast2, max_len):
    """
    计算两个AST之间的相似度（基于树编辑距离）。
    """
    if not ast1 or not ast2:
        return 0.0  # 解析失败返回最低相似度
    # 计算树编辑距离
    distance = simple_distance(ast1, ast2)
    similarity = 1 - distance / max_len if max_len > 0 else 0
    return similarity


# 计算两个代码语句之间的结构相似度AST实现
def calculate_ast_similarity(sentence1, sentence2):
    if not sentence1 or not sentence2:
        return 0

    sentence1 = validate_java_code(sentence1)
    sentence2 = validate_java_code(sentence2)

    wrapped_code1 = wrap_statement(sentence1)
    wrapped_code2 = wrap_statement(sentence2)

    ast1, cnt1 = parse_java_to_ast(wrapped_code1)
    ast2, cnt2 = parse_java_to_ast(wrapped_code2)

    similarity_score = calculate_similarity(ast1, ast2, max(cnt1, cnt2))

    return similarity_score


# 停用词
stop_words = {
    "a",
    "an",
    "the",
    "is",
    "in",
    "at",
    "of",
    "on",
    "and",
    "or",
    "if",
    "to",
    "it",
    "this",
    "that",
    "these",
    "those",
    "for",
    "with",
    "as",
    "was",
    "were",
    "be",
    "by",
    "not",
    "but",
    "you",
    "he",
    "she",
    "they",
    "we",
    "do",
    "does",
    "did",
    "has",
    "have",
    "had",
    "will",
    "would",
}

code_keywords = {
    "public",
    "void",
    "private",
    "class",
    "static",
    "int",
    "float",
    "for",
    "while",
    "if",
    "else",
}


class SentenceTrain:
    def __init__(self, work_path_dir, project_path_dir, embedding_dim=300):
        self.work_path_dir = work_path_dir
        self.project_path_dir = project_path_dir
        self.workspace = os.path.join(work_path_dir, str(project_path_dir))

        # 缺陷报告目录
        self.report_dir = os.path.join(self.workspace, REPORT_DIR)
        print("report_dir:", self.report_dir)

        # 补丁代码目录,用于提取历史缺陷代码
        self.patch_dir = os.path.join(self.workspace, PATCH_DIR)
        print("patch_dir:", self.patch_dir)

        # 可疑分数最高的k个文件，用于提取可疑语句进行训练
        result = get_recommend_filelist(work_path_dir, project_path_dir)
        self.file_paths = [entry["file_path"] for entry in result]
        self.scores = [float(entry["score"]) for entry in result]
        print("file_paths:", self.file_paths)

        # embedding_dim
        self.embedding_dim = embedding_dim

        # 加载预训练的CodeBERT模型和tokenizer，用于代码语句向量化
        self.code_bert_tokenizer = RobertaTokenizer.from_pretrained(
            "app/model/codebert-base"
        )
        self.code_bert_model = RobertaModel.from_pretrained("app/model/codebert-base")

        # 加载模型 BERT 用于自监督学习
        self.bert_tokenizer = BertTokenizer.from_pretrained(
            "app/model/bert-base-uncased"
        )
        self.bert_model = BertForMaskedLM.from_pretrained("app/model/bert-base-uncased")

        self.report_sentences = self.get_reports_sentences()
        self.patch_sentences = self.get_patch_sentences()
        self.suspicious_sentences = self.get_suspicious_sentences()

        # 扁平化的 suspicious_sentences
        self.suspicious_flat = []
        for sentence in self.suspicious_sentences:
            for line in sentence:
                self.suspicious_flat.append(line[0])

        self.total_sim = []
        self.differences = []

    # 获取所有缺陷报告的数据，每个缺陷报告用一个句子存储
    def get_reports_sentences(self):
        # 读取每个缺陷报告
        reports_files = [
            os.path.join(self.report_dir, f)
            for f in os.listdir(self.report_dir)
            if f.endswith(".json")
        ]
        report_data = []
        for report_file in reports_files:
            print(f"Loading defect report file: {report_file}")
            with open(report_file, "r", encoding="utf-8") as file:
                content = file.read()
                # 转换小写
                data = content.lower()
                # 去除停用词
                data = re.sub(r"[^\w\s]", "", data)
                words = data.split()
                words = [word for word in words if word not in stop_words]
                report_data.append(words)

        return report_data

    # 获取所有补丁的历史语句，以句子存储
    def get_patch_sentences(self):
        # 读取每个补丁
        print("读取所有补丁的历史语句...")
        patch_analysis = PatchAnalysis(self.work_path_dir, self.project_path_dir)
        history_contents = patch_analysis.get_removed_statements()
        clean_history = []
        """移除注释（单行和多行注释）"""
        # 正则模式：匹配单行注释和多行注释
        # pattern = r"//.*?$|/\*.*?\*/|/\*\*.*?\*/"
        for content in history_contents:
            # clean_content = re.sub(pattern, "", content, flags=re.DOTALL | re.MULTILINE)
            if not content:
                continue
            clean_history.append(content)
        # print("clean_history:", clean_history)

        # history_contents的每个元素都是一个句子
        return clean_history

    # 获取可疑文件中的所有可疑语句，以语句存储
    def get_suspicious_sentences(self):
        print("Getting suspicious sentences...")
        """移除注释（单行和多行注释）"""
        # 正则模式：匹配单行注释和多行注释
        # pattern = r"//.*?$|/\*.*?\*/|/\*\*.*?\*/"
        suspicious_sentences = []
        for i, file_path in enumerate(self.file_paths):
            # 按行读取缺陷文件
            score = self.scores[i]
            print(f"Loading {i}th suspicious file: {file_path}")
            with open(file_path, "r", encoding="utf-8") as file:
                lines = file.readlines()
            source_code = "".join(lines)
            # clean_code = re.sub(
            #     pattern, "", source_code, flags=re.DOTALL | re.MULTILINE
            # )
            statements = []  # 存储语句和其对应的文件行号信息
            buffer = []  # 暂存语句片段

            for i, line in enumerate(source_code.split("\n")):
                current_line = i + 1  # 行号
                line = line.strip()  # 移除两端空格
                if not line:  # 跳过空行
                    continue

                buffer.append(line)
                # 检查是否以语句结束符结尾
                if line.endswith(";") or line.endswith("{") or line.endswith("}"):
                    # 合并缓冲区的内容为完整语句
                    statement = " ".join(buffer)
                    statements.append((statement, file_path, current_line, score))
                    buffer = []  # 清空缓冲区
            suspicious_sentences.append(statements)

        return suspicious_sentences

    # 使用 codeBert 将代码语句转化成向量
    def get_sentence_vector(self, sentence):
        # 将代码转换为token ID
        inputs = self.code_bert_tokenizer(
            sentence, return_tensors="pt", truncation=True, padding=True, max_length=512
        )

        # 通过模型获取编码
        with torch.no_grad():
            outputs = self.code_bert_model(**inputs)

        # 获取[CLS] token的表示，作为句子的嵌入表示
        embedding = outputs.last_hidden_state[:, 0, :]
        return embedding

    # 计算两个句子向量之间的相似度
    def calculate_similarity(self, embedding1, embedding2):
        # 计算余弦相似度
        return cosine_similarity(
            embedding1.detach().numpy(), embedding2.detach().numpy()
        )[0][0]

    # 将历史语句和可疑语句转化为向量
    def get_sentences_vector(self):
        print("Convert historical sentences and suspicious sentences into vectors...")

        history_vector = []
        suspicious_vector = []
        report_vector = []

        print("patch_sentences is vectorizing now...")
        for i in range(len(self.patch_sentences)):
            print(
                f"Processing the {i}th historical defect statements: {self.patch_sentences[i]}"
            )
            history_vector.append(self.get_sentence_vector(self.patch_sentences[i]))

        print("suspicious_flat is vectorizing now...")
        for i in range(len(self.suspicious_flat)):
            print(
                f"Processing the {i}th suspicious statements: {self.suspicious_flat[i]}"
            )
            suspicious_vector.append(self.get_sentence_vector(self.suspicious_flat[i]))

        print("report_sentences is vectorizing now...")
        for i in range(len(self.report_sentences)):
            print(f"Processing the {i}th report statements: {self.report_sentences[i]}")
            report_vector.append(self.get_sentence_vector(self.report_sentences[i]))

        return history_vector, report_vector, suspicious_vector

    # 计算可疑语句与历史语句的相似度，以及计算可疑语句与缺陷报告的相似度,并求均值
    def calculate_total_similarity(self):
        history_vector, report_vector, suspicious_vector = self.get_sentences_vector()
        for i, sentence_embedding in enumerate(suspicious_vector):
            print(
                f"Processing the {i}th suspicious statements: {self.suspicious_flat[i]}"
            )
            average_sim1 = 0.0
            for j, history_embedding in enumerate(history_vector):
                print(
                    f"Processing the {j}th historical similarity statements: {self.patch_sentences[j]}"
                )
                average_sim1 += self.calculate_similarity(
                    sentence_embedding, history_embedding
                )
            # TODO 结构相似度暂未实现
            average_sim2 = 0.0
            for j, report_embedding in enumerate(report_vector):
                print(
                    f"Processing the {j}th report similarity statements: {self.report_sentences[j]}"
                )
                average_sim2 += self.calculate_similarity(
                    sentence_embedding, report_embedding
                )
            self.total_sim.append(
                (average_sim1 / len(history_vector) + average_sim2 / len(report_vector))
                / 2
            )
        return self.total_sim

    # 使用自监督学习，遮蔽相似度不为0的语句，并推断出新语句，计算二者的差异度
    def ssl_train_difference(self):
        # 自监督学习
        for i, sim in enumerate(self.total_sim):
            if sim == 0:
                print(f"Processing the {i}th SSL difference: {self.suspicious_flat[i]}")
                self.differences.append(0)
                continue

            # Step 1: 选择相似度不为0的句子及其原始向量
            original_sentence = self.suspicious_flat[i]
            print(f"Processing sentence for SSL training: {original_sentence}")
            original_vector = self.get_sentence_vector(original_sentence)

            # Step 2: 创建遮蔽版本的句子
            # 这里的遮蔽是基于BERT风格，随机选择句子中的部分单词替换为[MASK]
            tokens = self.bert_tokenizer.tokenize(original_sentence)
            masked_indices = random.sample(
                range(len(tokens)), max(1, len(tokens) // 3)  # 遮蔽约30%的词
            )
            for idx in masked_indices:
                tokens[idx] = "[MASK]"
            masked_sentence = self.bert_tokenizer.convert_tokens_to_string(tokens)
            print(f"Masked sentence: {masked_sentence}")

            # Step 3: 使用 BERT 推断遮蔽的词
            inputs = self.bert_tokenizer(masked_sentence, return_tensors="pt")
            with torch.no_grad():
                outputs = self.bert_model(**inputs)
            predicted_ids = torch.argmax(outputs.logits, dim=-1)
            predicted_sentence = self.bert_tokenizer.decode(predicted_ids[0])
            print(f"Predicted sentence: {predicted_sentence}")

            # Step 4: 将预测的句子转化为向量
            predicted_vector = self.get_sentence_vector(predicted_sentence)

            # Step 5: 计算原始向量和预测向量之间的差异（余弦距离）
            difference = 1 - self.calculate_similarity(
                original_vector, predicted_vector
            )
            print(f"Difference for sentence {i}: {difference}")

            # 保存差异
            self.differences.append(difference)

    # 计算综合相似度和差异度 + 文件可疑分数
    def total_similarity_and_recommend(self, k=10, threshold=0.5):
        # 整合分数
        print("Start calculating total similarity...")
        scores = []
        sentence_position = []
        file_paths = []
        for sentence in self.suspicious_sentences:
            for line in sentence:
                scores.append(float(line[3]))
                sentence_position.append(line[2])
                file_paths.append(line[1])
        for i in range(len(self.total_sim)):
            self.total_sim[i] = self.total_sim[i] + self.differences[i] + scores[i]

        # 排序
        print("Start sorting recommend k sentences...")
        data = list(
            zip(
                self.suspicious_flat, file_paths, sentence_position, self.total_sim
            )
        )
        # 根据total_sim的值进行排序，按降序排列
        data_sorted = sorted(data, key=lambda x: x[3], reverse=True)

        top_k_sentences = data_sorted[:k]
        for stmt, path, line, sim in top_k_sentences:
            print(f"语句: {stmt}, 所在文件: {path}, 所在行: {line}, 分数: {sim}")

        # 计算评价指标
        print("Start calculating evaluation metrics...")

        # Top-1: Check if the first sentence has similarity above threshold
        top1 = 1 if top_k_sentences[0][3] > threshold else 0

        # Top-5: Count sentences with sim > threshold in the top 5
        top5 = (
            sum(1 for stmt, path, line, sim in top_k_sentences[:5] if sim > threshold)
            / 5
        )

        # Top-10: Count sentences with sim > threshold in the top 10
        top10 = (
            sum(1 for stmt, path, line, sim in top_k_sentences[:10] if sim > threshold)
            / 10
        )

        # 计算MRR分数
        print("Start calculate MRR score...")
        mrr_score = 0.0
        for idx, (stmt, path, line, sim) in enumerate(top_k_sentences):
            if sim > threshold:
                mrr_score += 1.0 / (idx + 1)
        mrr_score /= k

        # 计算MAP分数
        print("Start calculate MAP score...")
        avg_precision_sum = 0.0
        for i in range(1, k + 1):
            relevant_count = 0
            precision_at_i = 0
            for j in range(i):
                stmt, path, line, sim = top_k_sentences[j]
                if sim > threshold:
                    relevant_count += 1
                    precision_at_i += relevant_count / (j + 1)
            avg_precision_sum += precision_at_i / i
        map_score = avg_precision_sum / k

        # 评价指标
        evaluation_metrics = {
            "top1": float(top1),
            "top5": float(top5),
            "top10": float(top10),
            "mrr_score": float(mrr_score),
            "map_score": float(map_score),
        }

        return top_k_sentences, evaluation_metrics


def sen_train(work_path_dir, project_path_dir):
    s_train = SentenceTrain(work_path_dir, project_path_dir)
    print("s_train.report_sentences.length:", len(s_train.report_sentences))
    print("s_train.patch_sentences.length:", len(s_train.patch_sentences))
    print("s_train.suspicious_flat.length:", len(s_train.suspicious_flat))

    print("Start calculating history and suspicious similarity...")
    s_train.calculate_total_similarity()
    print("s_train.total_sim.length:", len(s_train.total_sim))
    print("Start Self Supervised Learning and calculate difference...")
    s_train.ssl_train_difference()
    print("s_train.differences.length:", len(s_train.differences))

    print("Start calculating total similarity and recommend...")
    return s_train.total_similarity_and_recommend(k=10)
