import json
import os
import time

import networkx as nx
import numpy as np
import torch
import torch.nn.functional as F
from app.logic.utils.filepath import TOKEN_DIR, PDG_DIR, CSG_DIR, AST_DIR
from torch import nn
from torchtext.vocab import GloVe

'''
按项目路径将已经生成的PDG、AST、Token序列生成综合语义图CSG
'''

# 将已经生成的PDG、AST、Token序列生成综合语义图CSG
class GenerateGraph:
    def __init__(self, work_path_dir, project_path_dir):
        self.workspace = os.path.join(work_path_dir, str(project_path_dir))
        self.token_dir = os.path.join(work_path_dir, str(project_path_dir), TOKEN_DIR)
        self.pdg_dir = os.path.join(work_path_dir, str(project_path_dir), PDG_DIR)
        self.ast_dir = os.path.join(work_path_dir, str(project_path_dir), AST_DIR)
        self.csg_dir = os.path.join(work_path_dir, str(project_path_dir), CSG_DIR)

        self.CSGraph = nx.DiGraph()

        # 确保 csg 存储目录存在
        if not os.path.exists(self.csg_dir):
            os.makedirs(self.csg_dir)

        self.glove = GloVe(name='6B', dim=300)

    def create_comprehensive_semantic_graph(self):
        start_time = time.time()
        print(
            "Comprehensive Semantic Graph Creation Start Time: " + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
        # 创建有向图G
        self.CSGraph = nx.DiGraph()

        # 遍历AST、PDG和Token文件并进行图的构建
        self._add_ast_nodes_and_edges()
        self._add_pdg_edges()
        self._add_tokens_to_graph()

        # 存储生成的图
        self._save_graph()
        total_time = time.time() - start_time
        print("Total time: " + str(total_time // 3600) + "h" + str((total_time % 3600) // 60) + "m" + str(
            (total_time % 3600) % 60) + "s")

    def _add_ast_nodes_and_edges(self):
        # 遍历AST文件
        for ast_file in os.listdir(self.ast_dir):
            ast_file_path = os.path.join(self.ast_dir, ast_file)
            with open(ast_file_path, 'r', encoding='utf-8', errors='ignore') as f:
                ast_data = json.load(f)

            # 递归遍历AST并添加节点及边
            def traverse_ast(node, parent=None):
                node_id = node.get('id')
                node_value = node.get('value')
                identifier = node.get('identifier')

                if node_id is None or node_value is None:
                    return

                if node_id not in self.CSGraph:
                    self.CSGraph.add_node(node_id, value=node_value, identifier=identifier)

                if parent is not None:
                    self.CSGraph.add_edge(parent, node_id)  # 添加父子关系边

                # 继续递归遍历子节点
                for child in node.get('children', []):
                    traverse_ast(child, node_id)

            # 从AST的根节点开始遍历
            traverse_ast(ast_data)

    def _add_pdg_edges(self):
        # 遍历PDG文件
        for pdg_file in os.listdir(self.pdg_dir):
            pdg_file_path = os.path.join(self.pdg_dir, pdg_file)
            with open(pdg_file_path, 'r', encoding='utf-8', errors='ignore') as f:
                pdg_data = json.load(f)

            # 直接将PDG中的边添加到图中
            for edge in pdg_data.get('edges', []):
                src = edge.get('source')
                dest = edge.get('target')

                # 确保源节点和目标节点存在于图中
                if src not in self.CSGraph:
                    self.CSGraph.add_node(src)
                if dest not in self.CSGraph:
                    self.CSGraph.add_node(dest)

                # 添加PDG边
                self.CSGraph.add_edge(src, dest)

    def _add_tokens_to_graph(self):
        # 遍历Token序列文件
        for token_file in os.listdir(self.token_dir):
            token_file_path = os.path.join(self.token_dir, token_file)
            with open(token_file_path, 'r', encoding='utf-8', errors='ignore') as f:
                tokens = f.read().split()

            # 遍历令牌序列并添加到图中
            prev_token = None
            for token in tokens:
                # 每个令牌作为一个节点
                if token not in self.CSGraph:
                    self.CSGraph.add_node(token, type='token')

                # 令牌之间的顺序关系：添加边
                if prev_token is not None:
                    self.CSGraph.add_edge(prev_token, token)

                prev_token = token

    def _save_graph(self):
        # 将图保存到文件
        graph_file = os.path.join(self.csg_dir, 'comprehensive_semantic_graph.gexf')

        # 使用GEXF格式保存图，便于后续使用
        nx.write_gexf(self.CSGraph, graph_file)

        print(f"综合语义图已保存至 {graph_file}")

    def generate_node_embeddings(self):
        node_embeddings = {}
        for node in self.CSGraph.nodes:
            try:
                node_embeddings[node] = self.glove[node]
            except KeyError:
                node_embeddings[node] = torch.zeros(self.glove.dim)

        return node_embeddings

    # 定义 Attention 层
    class Attention(nn.Module):
        def __init__(self, input_dim):
            super().__init__()
            self.attention_weights = nn.Parameter(torch.Tensor(input_dim, 1))  # 参数初始化
            nn.init.xavier_uniform_(self.attention_weights)  # 使用 Xavier 初始化

        def forward(self, x):
            """
            x: 输入的 GRU 输出序列，shape: (batch_size, seq_len, hidden_dim)
            """
            # 计算 attention 权重
            attn_score = torch.matmul(x, self.attention_weights)  # (batch_size, seq_len, 1)
            attn_score = attn_score.squeeze(-1)  # 去掉最后一维，变成 (batch_size, seq_len)

            # 计算 softmax 权重
            attn_weights = F.softmax(attn_score, dim=1)  # 对 seq_len 维度做 softmax，shape: (batch_size, seq_len)

            # 将注意力权重与 GRU 输出相乘
            weighted_sum = torch.sum(x * attn_weights.unsqueeze(-1), dim=1)  # 加权求和，shape: (batch_size, hidden_dim)
            return weighted_sum, attn_weights

    # 定义 Att-BiGRU 模型
    class AttBiGRU(nn.Module):
        def __init__(self, input_dim, hidden_dim, output_dim):
            super().__init__()
            self.gru = nn.GRU(input_dim, hidden_dim, bidirectional=True, batch_first=True)  # 双向GRU
            self.attention = GenerateGraph.Attention(hidden_dim * 2)  # 双向GRU输出的维度是 2 * hidden_dim
            self.fc = nn.Linear(hidden_dim * 2, output_dim)  # 全连接层，输出层

        def forward(self, x):
            """
            x: 输入的节点嵌入序列，shape: (batch_size, seq_len, input_dim)
            """
            # 通过 GRU 层获取输出
            gru_out, _ = self.gru(x)  # gru_out shape: (batch_size, seq_len, hidden_dim*2)

            # 通过 Attention 获取加权的上下文表示
            weighted_sum, attn_weights = self.attention(gru_out)

            # 通过全连接层进行映射
            output = self.fc(weighted_sum)  # shape: (batch_size, output_dim)

            return output, attn_weights


def gen_csg(work_dir, project_path):
    generator = GenerateGraph(work_dir, project_path)
    generator.create_comprehensive_semantic_graph()
    node_embeddings = generator.generate_node_embeddings()
    embeddings_list = list(node_embeddings.values())
    print(type(embeddings_list))
    for i, item in enumerate(embeddings_list):  # 仅打印前10个
        print(f"Item {i}: Type={type(item)}, Shape/Length={getattr(item, 'shape', len(item))}")
    embeddings_list = [item.numpy() if isinstance(item, torch.Tensor) else item for item in embeddings_list]
    embeddings_array = np.array(embeddings_list, dtype=np.float32)
    print("----------------------------")
    print(type(embeddings_array), embeddings_array.dtype)
    print(embeddings_array)
    print("----------------------------")
    node_embeddings_tensor = torch.tensor(embeddings_array, dtype=torch.float32)
    node_embeddings_tensor = node_embeddings_tensor.unsqueeze(0)
    input_dim = 300  # 每个词向量的维度
    hidden_dim = 128  # GRU 隐藏层维度
    output_dim = 64  # 输出层维度（可以根据任务进行调整）

    # 初始化
    model = generator.AttBiGRU(input_dim, hidden_dim, output_dim)

    output, attn_weights = model(node_embeddings_tensor)

    return output, attn_weights


# 使用示例
if __name__ == "__main__":
    work_dir = '../../../data'
    project_path = '30'
    generator = GenerateGraph(work_dir, project_path)
    generator.create_comprehensive_semantic_graph()
    node_embeddings = generator.generate_node_embeddings()
    print("node_embeddings:", node_embeddings, "node_embeddings len:", len(node_embeddings))
    print("node_embeddings[0]:", node_embeddings[list(node_embeddings)[0]].shape)

    embeddings_list = list(node_embeddings.values())
    for i in range(len(embeddings_list)):
        print("index:", i, "embeddings_list", embeddings_list[i].shape)
        assert len(embeddings_list[i]) == 300, "每个嵌入的维度应该是 300"

    embeddings_array = np.array(embeddings_list)

    node_embeddings_tensor = torch.tensor(embeddings_array, dtype=torch.float32)

    node_embeddings_tensor = node_embeddings_tensor.unsqueeze(0)
    print(node_embeddings_tensor.shape)
    input_dim = 300  # 每个词向量的维度
    hidden_dim = 128  # GRU 隐藏层维度
    output_dim = 64  # 输出层维度（可以根据任务进行调整）

    # 初始化
    model = generator.AttBiGRU(input_dim, hidden_dim, output_dim)

    output, attn_weights = model(node_embeddings_tensor)

    # 打印结果
    print("Output:", output)
    print("Output Shape:", output.shape)
    print("Attention Weights:", attn_weights)
    print("Attention Weights Shape:", attn_weights.shape)
