import os
import re

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from app.logic.utils.filepath import REPORT_DIR, TOKEN_DIR
from gensim.models import Word2Vec

# try:
#     # 尝试下载
#     nltk.download('stopwords')
#     stop_words = set(stopwords.words('english'))
# except Exception as e:
#     print("Failed to download NLTK stopwords. Using custom stopword list.")
#     # 使用自定义停用词表
stop_words = {'a', 'an', 'the', 'is', 'in', 'at', 'of', 'on', 'and', 'or', 'if', 'to', 'it', 'this', 'that',
              'these', 'those', 'for', 'with', 'as', 'was', 'were', 'be', 'by', 'not', 'but', 'you', 'he', 'she',
              'they', 'we', 'do', 'does', 'did', 'has', 'have', 'had', 'will', 'would'}


class DefectLocalization:
    def __init__(self, work_path_dir, project_path_dir, embedding_dim=300, max_len=1000, batch_size=32, device=None):
        self.workspace = os.path.join(work_path_dir, str(project_path_dir))
        self.src_dir = os.path.join(self.workspace, TOKEN_DIR)
        self.report_dir = os.path.join(self.workspace, REPORT_DIR)
        self.embedding_dim = embedding_dim
        self.max_len = max_len
        self.batch_size = batch_size
        self.device = device if device else torch.device("cuda" if torch.cuda.is_available() else "cpu")

        # 初始化模型
        self.word2vec_rep_model = None
        self.word2vec_src_model = None
        self.textcnn_rep_model = None
        self.textcnn_src_model = None

        # 编程语言关键词和停用词
        self.code_keywords = {'public', 'void', 'private', 'class', 'static', 'int', 'float', 'for', 'while', 'if',
                              'else'}
        self.stop_words = stop_words  # 使用nltk中的英语停用词表

    def preprocess_code(self, code):
        """
        对源代码进行预处理：过滤掉编程语言中的关键词和标点符号
        """
        # 将代码转换为小写
        code = code.lower()

        # 使用正则表达式去除标点符号，只保留字母、数字和空格
        code = re.sub(r'[^\w\s]', '', code)

        # 将源代码按照空格分割成词语
        words = code.split()

        # 过滤掉源代码中的编程语言关键词
        words = [word for word in words if word not in self.code_keywords]

        return words

    def preprocess_report(self, report):
        """
        对缺陷报告进行预处理：去除停用词和标点符号
        """
        # 将报告内容转换为小写
        report = report.lower()

        # 使用正则表达式去除标点符号
        report = re.sub(r'[^\w\s]', '', report)

        # 将报告内容按照空格分割成词语
        words = report.split()

        # 过滤掉停用词
        words = [word for word in words if word not in self.stop_words]

        return words

    def load_data(self):
        # 加载源代码文件
        code_files = [os.path.join(self.src_dir, f) for f in os.listdir(self.src_dir) if f.endswith('.json')]

        # 加载缺陷报告文件
        reports_files = [os.path.join(self.report_dir, f) for f in os.listdir(self.report_dir) if f.endswith('.json')]

        src_data = []
        report_data = []

        # 读取源代码文件并按词语分割
        for code_file in code_files:
            print(f"Loading source code file: {code_file}")
            with open(code_file, 'r', encoding='utf-8') as file:
                content = file.read()
                src_data.append(self.preprocess_code(content))  # 对源代码进行预处理

        # 读取缺陷报告文件并按词语分割
        for report_file in reports_files:
            print(f"Loading defect report file: {report_file}")
            with open(report_file, 'r', encoding='utf-8') as file:
                content = file.read()
                report_data.append(self.preprocess_report(content))  # 对缺陷报告进行预处理
        # 将两个数据保存在txt文件中
        with open(os.path.join(self.workspace, 'src_data.txt'), 'w', encoding='utf-8') as file:
            for item in src_data:
                file.write(' '.join(item) + '\n')

        with open(os.path.join(self.workspace, 'report_data.txt'), 'w', encoding='utf-8') as file:
            for item in report_data:
                file.write(' '.join(item) + '\n')

        return src_data, report_data

    def train_word2vec_src(self, sentences):
        self.word2vec_src_model = Word2Vec(sentences, vector_size=self.embedding_dim, window=5, min_count=1, workers=4)
        print("self.word2vec_src_model:", self.word2vec_src_model)

    def train_word2vec_rep(self, sentences):
        self.word2vec_rep_model = Word2Vec(sentences, vector_size=self.embedding_dim, window=5, min_count=1, workers=4)
        print("self.word2vec_rep_model:", self.word2vec_rep_model)

    def encode_data(self, sentences, is_report=False):
        """
        将每个单词序列转换为嵌入矩阵，并进行截断和补齐。
        参数:
            sentences: 二维数据，每个元素是一个单词序列 (列表形式)，例如：[["word1", "word2", ...], ["word1", "word2", ...], ...]
        返回:
            encoded_data: 经过处理后的嵌入矩阵，每个文件（句子）会被转换为 (max_len, embedding_dim) 大小的矩阵。
        """
        encoded_data = []
        if is_report:
            word2vec_model = self.word2vec_rep_model
        else:
            word2vec_model = self.word2vec_src_model

        for sentence in sentences:
            encoded_sentence = []
            # sentence代表了一个文件的单词序列
            for word in sentence:
                if word in word2vec_model.wv:
                    encoded_sentence.append(word2vec_model.wv[word])
                else:
                    encoded_sentence.append(np.zeros(self.embedding_dim))  # 未知词的嵌入向量设为零向量

            # 如果句子长度小于 max_len，则填充零向量
            if len(encoded_sentence) < self.max_len:
                padding = np.zeros((self.max_len - len(encoded_sentence), self.embedding_dim))
                encoded_sentence = np.vstack([encoded_sentence, padding])  # 使用垂直堆叠填充

            # 如果句子长度大于 max_len，则截断
            encoded_sentence = encoded_sentence[:self.max_len]

            encoded_data.append(np.array(encoded_sentence))

        return encoded_data

    class TextCNN(nn.Module):
        def __init__(self, kernel_sizes, num_channels, max_len, embedding_dim):
            super().__init__()
            self.kernel_sizes = kernel_sizes
            self.num_channels = num_channels

            # 卷积层（为缺陷报告和源代码的不同层定义）
            self.conv_layers = nn.ModuleList([
                nn.Conv2d(1, num_channels, (kernel_size, embedding_dim))
                for kernel_size in kernel_sizes
            ])

            # 最大池化层：不同卷积核大小需要分别计算池化的维度
            self.pool_layers = nn.ModuleList([
                nn.MaxPool2d((max_len - kernel_size + 1, 1))  # 根据kernel_size调整池化维度
                for kernel_size in kernel_sizes
            ])

        def forward(self, x):
            # 输入的x为[batch_size, max_len, embedding_dim]，需要加一个channel维度
            x = x.unsqueeze(1)  # 增加一个维度，变为 [batch_size, 1, max_len, embedding_dim]

            # 卷积 + 最大池化
            pooled_outputs = []
            for conv, pool in zip(self.conv_layers, self.pool_layers):
                conv_out = conv(x)  # 经过卷积层，结果为 [batch_size, num_channels, max_len-kernel_size+1, 1]
                pooled_out = pool(conv_out)  # 经过最大池化，结果为 [batch_size, num_channels, 1, 1]
                pooled_outputs.append(pooled_out)

            # 拼接所有池化后的特征，结果为 [batch_size, num_channels * len(kernel_sizes), 1, 1]
            cat_out = torch.cat(pooled_outputs, dim=1)

            # 将输出flatten为一维向量，结果为 [batch_size, num_channels * len(kernel_sizes)]
            cat_out = cat_out.view(cat_out.size(0), -1)

            return cat_out

    class TextCNNAttention(nn.Module):
        def __init__(self, feature_dim):
            super().__init__()
            self.feature_dim = feature_dim
            self.attn_layer = nn.Linear(feature_dim, 1)

        def forward(self, src_features, report_features):
            """
            计算源代码特征和缺陷报告特征之间的注意力权重，并加权缺陷报告特征。

            :param src_features: 源代码特征 [batch_size, feature_dim]
            :param report_features: 缺陷报告特征 [batch_size, feature_dim]
            :return: 加权后的缺陷报告特征 [batch_size, feature_dim]
            """
            # 计算源代码特征和缺陷报告特征之间的相似度
            # 使用点积计算相似度： batch_size x feature_dim
            similarity = torch.matmul(src_features, report_features.T)  # [batch_size, batch_size]

            # 使用softmax计算注意力权重
            attn_weights = F.softmax(similarity, dim=-1)  # [batch_size, batch_size]

            # 通过注意力权重加权缺陷报告特征
            weighted_report_features = torch.matmul(attn_weights, report_features)  # [batch_size, feature_dim]

            return weighted_report_features


def gen_report_de_noise(work_dir, project_path):
    dl = DefectLocalization(work_dir, project_path)
    # 加载源代码数据以及缺陷报告数据
    src_data, report_data = dl.load_data()

    dl.train_word2vec_src(src_data)
    dl.train_word2vec_rep(report_data)

    src_data = dl.encode_data(src_data)
    report_data = dl.encode_data(report_data, is_report=True)

    kernel_sizes = [3, 4, 5]
    num_channels = 64

    dl.textcnn_rep_model = dl.TextCNN(kernel_sizes, num_channels, dl.max_len, dl.embedding_dim)
    dl.textcnn_src_model = dl.TextCNN(kernel_sizes, num_channels, dl.max_len, dl.embedding_dim)

    report_data = np.array(report_data)
    report_data = torch.tensor(report_data, dtype=torch.float32)

    src_data = np.array(src_data)
    src_data = torch.tensor(src_data, dtype=torch.float32)

    # 进行前向传播
    report_features = dl.textcnn_rep_model(report_data)  # 对缺陷报告进行特征提取
    src_features = dl.textcnn_src_model(src_data)  # 对源代码文件进行特征提取

    feature_dim = report_features.shape[1]
    textcnn_attention = dl.TextCNNAttention(feature_dim)
    weighted_report_features = textcnn_attention(src_features, report_features)

    features = torch.cat([src_features, weighted_report_features], dim=1)

    return features


# 使用方法示例
if __name__ == "__main__":
    work_dir = '../../../data'  # 请修改为实际路径
    project_path = '29'  # 请修改为实际项目路径

    # 初始化降噪模型
    dl = DefectLocalization(work_dir, project_path)
    # 加载源代码数据以及缺陷报告数据
    src_data, report_data = dl.load_data()

    # for i in range(len(src_data)):
    #     print("index:", i, "src_data_len:", len(src_data[i]))
    # for i in range(len(report_data)):
    #     print("index:", i, "report_data_len:", len(report_data[i]))
    print("src_data:", len(src_data), "report_data:", len(report_data))

    # 训练Word2Vec模型
    print("Training Word2Vec model...")
    dl.train_word2vec_src(src_data)
    dl.train_word2vec_rep(report_data)
    print("Word2Vec model training completed.")

    # 计算源代码和缺陷报告的词向量
    print("Calculating word vectors for source code and report...")
    src_data = dl.encode_data(src_data)
    report_data = dl.encode_data(report_data, is_report=True)
    print("Word vectors calculated.")

    # for i in range(len(src_data)):
    #     print("index:",i,"src_data", src_data[i])
    # for i in range(len(report_data)):
    #     print("index:",i,"shape:",report_data[i].shape,"report_data:", report_data[i])

    print("src_data:", len(src_data), "report_data:", len(report_data))
    """
    计算完的数据结果：
        源代码向量：n * [max_len * 300], n 为源代码数量, max_len为最大序列长度
        缺陷报告向量：m * [max_len * 300],m 为缺陷报告数量, max_len为最大序列长度
    """

    # 初始化TextCNN模型
    print("Initializing TextCNN model...")
    kernel_sizes = [3, 4, 5]
    num_channels = 64

    dl.textcnn_rep_model = dl.TextCNN(kernel_sizes, num_channels, dl.max_len, dl.embedding_dim)
    dl.textcnn_src_model = dl.TextCNN(kernel_sizes, num_channels, dl.max_len, dl.embedding_dim)

    report_data = np.array(report_data)
    report_data = torch.tensor(report_data, dtype=torch.float32)

    src_data = np.array(src_data)
    src_data = torch.tensor(src_data, dtype=torch.float32)

    # 进行前向传播
    report_features = dl.textcnn_rep_model(report_data)  # 对缺陷报告进行特征提取
    src_features = dl.textcnn_src_model(src_data)  # 对源代码文件进行特征提取

    print("report_features:", report_features.shape, "src_features:", src_features.shape)

    print("TextCNN model initialized.")

    """
    注意力机制
    """
    feature_dim = report_features.shape[1]
    print("feature_dim:", feature_dim)
    textcnn_attention = dl.TextCNNAttention(feature_dim)
    weighted_report_features = textcnn_attention(src_features, report_features)
    print("weighted_report_features:", weighted_report_features.shape)
    print("src_features:", src_features.shape)

    # 特征融合
    features = torch.cat([src_features, weighted_report_features], dim=1)
    print("features:", features.shape)

    print(features.shape[0], features.shape[1])
