import time
from datetime import datetime

import networkx as nx
import torch
import torch.nn as nn
import torch.nn.functional as F
from matplotlib import pyplot as plt
from torch_geometric.data import Data
from torch_geometric.nn import GATConv


class EmbeddingLayer(nn.Module):
    def __init__(self, num_embeddings, embedding_dim):
        super().__init__()
        """
        num_embeddings,不同节点属性的数量
        embedding_dim,是希望每个属性映射到的向量维度
        """
        self.embedding = nn.Embedding(num_embeddings, embedding_dim)

    def forward(self, x):
        print(x)
        print("Index range:", x.min(), x.max())  # 打印索引范围
        """
        Embedding层的输入必须是非负整数，因为需要从嵌入矩阵中查找对应向量索引
        """
        return self.embedding(x)


class ResidualGATLayer(nn.Module):
    """GAT残差连接层"""

    def __init__(self, in_channels, out_channels, heads=1, dropout=0., negative_slope=0.2):
        super().__init__()
        """heads参数，每层GAT都使用多头注意力机制"""
        self.gat = GATConv(in_channels, out_channels, heads=heads, dropout=dropout, negative_slope=negative_slope)
        """每层GAT之后增加残差链接，缓解梯度消失问题"""
        self.res_connection = nn.Linear(in_channels, out_channels * heads, bias=False)

    def reset_parameters(self):
        """重置模型参数，以便于模型在每次训练开始前都能从相同的初始化状态开始"""
        self.gat.reset_parameters()  # 调用GATConv的reset_parameters方法
        self.res_connection.reset_parameters()  # 调用Linear层的reset_parameters方法

    def forward(self, x, edge_index):
        out = self.gat(x, edge_index)
        res = self.res_connection(x)
        return out + res


class GATModel(nn.Module):
    def __init__(self, num_features, embedding_dim, hidden_dim, out_dim, num_layers=2, heads=4):
        super().__init__()
        self.embedding_layer = EmbeddingLayer(num_features, embedding_dim)
        self.layers = nn.ModuleList()
        self.layers.append(ResidualGATLayer(embedding_dim, hidden_dim, heads=heads))
        for _ in range(num_layers - 2):
            self.layers.append(ResidualGATLayer(hidden_dim * heads, hidden_dim, heads=heads))
        self.layers.append(ResidualGATLayer(hidden_dim * heads, out_dim, heads=1))  # 最后一层输出为回归任务的分数
        self.reset_parameters()

    def reset_parameters(self):
        self.embedding_layer.embedding.reset_parameters()
        for layer in self.layers:
            layer.reset_parameters()

    def forward(self, x, edge_index):
        x = self.embedding_layer(x)
        for layer in self.layers[:-1]:
            x = F.leaky_relu(layer(x, edge_index))
        x = self.layers[-1](x, edge_index)
        return x


# 构建BOM图
def build_bom_graph(bom_structure):
    # 初始化有向图
    G = nx.DiGraph()

    # 有向权重边
    def addEdge(source, target):
        if isinstance(target, str):
            G.add_edge(source, target)
        elif isinstance(target, tuple):  # 带权重的边
            G.add_edge(source, target[0], weight=target[1])

    # 构建有向图
    def buildGraph(bom_structure):
        global p
        for parent, child in bom_structure.items():
            # 不考虑父件的数量
            if isinstance(parent, tuple):
                p = parent[0]
            if isinstance(parent, str):
                p = parent

            # 检查子件是否是字典
            if isinstance(child, dict):
                for key, _ in child.items():
                    addEdge(p, key)
                # 递归
                buildGraph(child)

            # 检查子件是否是集合
            if isinstance(child, set):
                for c in child:
                    addEdge(p, c)

    buildGraph(bom_structure)

    print("Graph:", G)
    print("Edges:", G.edges)
    print("Nodes:", list(G.nodes))
    # 打印边和权重信息
    for u, v, data in G.edges(data=True):
        weight = data.get('weight', 1)  # 如果没有权重，默认为1
        print(f"Edge from {u} to {v} has weight {weight}")

    return G


# 构建节点特征
def build_node_features(bom_attr, nodes):
    # 1. 构建唯一属性集合，为构建Embedding层准备
    unique_properties = set()
    for properties in bom_attr.values():
        unique_properties.update(properties.keys())

    # 2. 构建一个[属性,索引]映射表
    property_to_idx = {prop: i for i, prop in enumerate(unique_properties)}

    # 3. 构建Embedding层
    num_embeddings = len(unique_properties)  # 不同节点属性的数量
    embedding_dim = 16  # 希望映射到到向量维度
    embedding_layer = EmbeddingLayer(num_embeddings, embedding_dim)

    # 4. 嵌入向量
    node_features_list = []  # 定义特征列表
    for node in nodes:
        features = bom_attr.get(node, {})

        """
        如果features.keys()为空（即某个节点没有定义任何属性），那么feature_indices将会是空列表
        进而导致feature_vectors也是空列表，因为没有索引去调用embedding_layer生成Embedding。
        """
        feature_indices = [property_to_idx[prop] for prop in features.keys()]  # 获取属性的索引

        """
        增加检查：
        生成Embedding之前检查feature_indices是否为空，如果为空，则采取替代策略，比如分配一个零向量或随机向量来代表该节点的特征缺失情况
        """
        if feature_indices:  # 检查feature_indices是否非空
            feature_vectors = [embedding_layer(torch.tensor([idx])) for idx in feature_indices]
            """squeeze，去除形状为(1, 16)的张量中的单维条目"""
            node_feature_vector = torch.mean(torch.stack(feature_vectors),
                                             dim=0).squeeze()  # torch.mean平均池化所有属性的Embedding
            node_feature_vector = F.relu(node_feature_vector)  # 使用ReLU激活函数确保特征值非负
        else:  # 如果没有属性，则分配一个零向量或随机向量
            node_feature_vector = torch.zeros(embedding_dim)  # 或者 torch.randn(embedding_dim) 生成随机向量

        node_features_list.append(node_feature_vector)

    # 5. 输出节点特征
    """
    node_features中，张量形状为Tensor:(16,)
    long()，将FloatTensor转换为LongTensor，因为在网络训练中EmbeddingLayer层输入需要为LongTensor
    结果为 Tensor（13，16）
    """
    node_features = torch.stack(node_features_list).long()
    return node_features


# 构建边索引和权重
def build_edge_index_and_weight(G):
    edge_index = []
    edge_weight = []
    # 字典，键值对代表节点和其邻居节点及边的权重
    for parent, children in G.adjacency():
        for child, weight in children.items():
            # 映射到列表中，代表边的起点和终点
            edge_index.append([list(G.nodes).index(parent), list(G.nodes).index(child)])
            if len(weight) != 0:
                # 提取权重值
                edge_weight.append(weight['weight'])
            else:
                # 没有的话，默认是1
                edge_weight.append(1)
    return torch.tensor(edge_index).t().contiguous(), torch.tensor(edge_weight, dtype=torch.float)


# 定义训练函数
def train(model, data, optimizer, criterion, epochs, decay_rate=0.99):
    # 开始训练模型，记录开始时间
    train_start_time = time.time()
    # 训练模型
    model.train()
    # 收集损失函数值
    losses = []
    # 学习率的衰减
    scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=decay_rate)
    # 开始迭代
    for epoch in range(epochs):
        optimizer.zero_grad()
        out = model(data.x, data.edge_index)
        loss = criterion(out, data.y)
        losses.append(loss.item())
        loss.backward()
        optimizer.step()
        if epoch % 10 == 0:
            print(f"Epoch {epoch}: Loss {loss.item()}")
        # 每个epoch后更新学习率
        scheduler.step()

    # 生成损失函数的变化图
    plt.figure()
    plt.plot(range(epochs), losses)
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.title('Loss over epochs')
    plt.savefig(f'loss_over_epochs_{datetime.now().strftime("%Y%m%d_%H%M%S")}.png', dpi=300, bbox_inches='tight')

    # Save model
    torch.save(model.state_dict(), "model.pth")

    # 结束训练模型，记录结束时间
    train_end_time = time.time()
    print(f"模型训练花费时间: {train_end_time - train_start_time} 秒")
    print("GNN Dimension : ", model)


# 定义测试函数
def test(model, data, parent):
    with torch.no_grad():
        output = model(data.x, data.edge_index)
        parent_index = nodes.index(parent)
        children_scores = output[parent_index].tolist()
        children = list(bom_graph.adj[parent])
        children_and_scores = list(zip(children, children_scores))
        children_and_scores.sort(key=lambda x: x[1], reverse=True)
        return children_and_scores[:3]


# 示例数据
bom_structure = {
    ('WGPart1', 2): {
        'SBB3': {('SBB8', 100), 'SBB4', 'SBB5'},
        'SBB4': {'SBB3': {('SBB10', 10)}, ('SBB9', 2): {}},
        'SBB5': {'SBB1', 'SBB6'},
        'SBB6': {'SBB5', ('SBB3', 10)},
        ('SBB8', 3): {('SBB1', 10)}
    },
    ('WGPart2', 2): {},
    'WGPart3': {'SBB1': {'SBB2'}},
    'WGPart4': {'SBB7': {'SBB8', ('SBB9', 2)}}
}

bom_attr = {
    "SBB1": {"size": 10, "material": "glass", "weight": 200},
    "SBB2": {"model": "ABC123", "power": 50},
    "SBB3": {"brand": "XYZ", "size": 5},
    "SBB4": {"type": "mechanical keyboard", "color": "black", "weight": 300},
    "SBB5": {"model": "Intel Core i7", "power": 65},
    "SBB6": {"model": "Intel Core i7", "power": 65}
}

# bom_structure, bom_attr = genData(part_nums=50, sbb_nums=500)

# 构建图数据
bom_graph = build_bom_graph(bom_structure)
nodes = list(bom_graph.nodes)
node_features = build_node_features(bom_attr, nodes)
edge_index, edge_weight = build_edge_index_and_weight(bom_graph)
assert node_features.size(0) == len(nodes), "Node features count doesn't match node count."
assert edge_index.size(1) == len(edge_weight), "Edge index size doesn't match edge weights count."

# 构建PyG的Data对象
data = Data(x=node_features, edge_index=edge_index, edge_attr=edge_weight, y=torch.zeros(len(nodes)))
print(data)

# 实例化模型
model = GATModel(node_features.size(1), 16, 8, len(nodes))  # 调整参数以适应Embedding和全连接层
print(model)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=1e-5)
criterion = nn.MSELoss()

# 训练模型
train(model, data, optimizer, criterion, epochs=300)

# 加载模型
model = GATModel(node_features.size(1), 16, 8, len(nodes))
model.load_state_dict(torch.load("model.pth"))
model.eval()


# 定义预测函数
def predict(model, data, parent):
    with torch.no_grad():
        output = model(data.x, data.edge_index)
        if parent not in nodes:
            print(parent, "在图中不存在")
            return
        else:
            parent_index = nodes.index(parent)
        # 通过索引找到模型输出中与parent节点对应的嵌入向量，并转换为列表格式
        children_scores = output[parent_index].tolist()
        # G.adj[node] 返回一个字典，键是图中的每个节点；值是字典，与 node 有直接出边（即 node 是边的起点）的邻居节点及其属性和权重信息
        # 通过list方法输出为集合
        children = list(bom_graph.adj[parent])
        # zip方法将多个可迭代对象（如列表、元组、字符串等）中对应的元素打包成一个个元组，然后返回由这些元组组成的迭代器
        # 通过list方法输出为集合
        children_and_scores = list(zip(children, children_scores))

        children_and_scores.sort(key=lambda x: x[1], reverse=True)
        return children_and_scores[:3]


# 输入父件进行子件预测
parent = 'SBB3'
print("预测得分最高的三个结果：", predict(model, data, parent))
