import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import networkx as nx
from collections import defaultdict
import random
#时序异构图表示学习服务 (THGRL)
class TemporalHeterogeneousGraphRepresentationLearning:
    def __init__(self, embedding_dim=64, time_steps=5, learning_rate=0.01, epochs=100):
        self.embedding_dim = embedding_dim
        self.time_steps = time_steps
        self.learning_rate = learning_rate
        self.epochs = epochs
        self.model = None
        self.node_embeddings = {}
        self.node_to_idx = {}
        self.type_to_idx = {}
    
    class THGRLModel(nn.Module):
        def __init__(self, num_nodes, num_node_types, num_edge_types, embedding_dim, time_steps):
            super().__init__()
            self.node_embeddings = nn.Embedding(num_nodes, embedding_dim)
            self.node_type_embeddings = nn.Embedding(num_node_types, embedding_dim)
            self.edge_type_embeddings = nn.Embedding(num_edge_types, embedding_dim)
            
            # RNN用于捕获时序动态
            self.rnn = nn.GRU(embedding_dim*2, embedding_dim, batch_first=True)
            
            # 异构图注意力层
            self.attention = nn.Sequential(
                nn.Linear(embedding_dim*3, embedding_dim),
                nn.Tanh(),
                nn.Linear(embedding_dim, 1)
            )
            
            # 时间编码层
            self.time_encoder = nn.Sequential(
                nn.Linear(1, embedding_dim),
                nn.Tanh()
            )
            
            # 节点预测层
            self.node_predictor = nn.Sequential(
                nn.Linear(embedding_dim*2, embedding_dim),
                nn.ReLU(),
                nn.Linear(embedding_dim, 1),
                nn.Sigmoid()
            )
            
            # 初始化
            self.init_weights()
            
        def init_weights(self):
            nn.init.xavier_uniform_(self.node_embeddings.weight)
            nn.init.xavier_uniform_(self.node_type_embeddings.weight)
            nn.init.xavier_uniform_(self.edge_type_embeddings.weight)
            
        def forward(self, source_nodes, target_nodes, edge_types, source_types, target_types,
                   time_seqs, time_positions):
            # 获取基础嵌入
            source_emb = self.node_embeddings(source_nodes)  # [batch_size, emb_dim]
            target_emb = self.node_embeddings(target_nodes)  # [batch_size, emb_dim]
            
            # 组合节点类型嵌入
            source_type_emb = self.node_type_embeddings(source_types)  # [batch_size, emb_dim]
            target_type_emb = self.node_type_embeddings(target_types)  # [batch_size, emb_dim]
            
            # 边类型嵌入
            edge_type_emb = self.edge_type_embeddings(edge_types)  # [batch_size, emb_dim]
            
            # 注意力机制整合异构信息
            source_combined = source_emb + source_type_emb  # [batch_size, emb_dim]
            target_combined = target_emb + target_type_emb  # [batch_size, emb_dim]
            
            # 计算注意力权重
            attn_input = torch.cat([source_combined, target_combined, edge_type_emb], dim=1)  # [batch_size, 3*emb_dim]
            attn_weights = F.softmax(self.attention(attn_input), dim=0)  # [batch_size, 1]
            
            # 时间编码
            time_emb = self.time_encoder(time_positions.unsqueeze(-1))  # [batch_size, emb_dim]
            
            # 时序信息整合
            batch_size = source_nodes.size(0)
            
            # RNN输入准备 - 每个样本形成时序序列
            # 简化：这里假设时间序列已经被批处理为 [batch_size, time_steps, feature_dim]
            if time_seqs is not None:
                # [batch_size, time_steps, 2*emb_dim]
                time_seq_input = torch.cat([
                    source_combined.unsqueeze(1).expand(-1, time_seqs.size(1), -1),
                    time_emb.unsqueeze(1).expand(-1, time_seqs.size(1), -1)
                ], dim=2)
                
                # 经过RNN
                rnn_out, _ = self.rnn(time_seq_input)  # [batch_size, time_steps, emb_dim]
                temporal_emb = rnn_out[:, -1, :]  # 取最后一个时间步 [batch_size, emb_dim]
            else:
                # 如果没有时序信息，使用当前嵌入
                temporal_emb = source_combined
            
            # 预测连接概率
            pred_input = torch.cat([temporal_emb, target_combined], dim=1)  # [batch_size, 2*emb_dim]
            pred = self.node_predictor(pred_input)  # [batch_size, 1]
            
            return pred.squeeze(), source_combined, target_combined
    
    def _prepare_data(self, graph_sequence):
        """准备训练数据"""
        # 收集所有节点和类型
        all_nodes = set()
        node_types = set()
        edge_types = set()
        
        for g in graph_sequence:
            for node, attr in g.nodes(data=True):
                all_nodes.add(node)
                node_types.add(attr.get('type', 'unknown'))
            
            for _, _, attr in g.edges(data=True):
                edge_types.add(attr.get('type', 'default'))
        
        # 建立索引映射
        self.node_to_idx = {node: i for i, node in enumerate(all_nodes)}
        self.type_to_idx = {t: i for i, t in enumerate(node_types)}
        self.edge_type_to_idx = {t: i for i, t in enumerate(edge_types)}
        
        # 准备训练三元组
        triplets = []
        time_seqs = defaultdict(list)
        
        for t, g in enumerate(graph_sequence):
            # 记录每个节点的时序邻居
            for node in g.nodes():
                neighbors = list(g.neighbors(node))
                if neighbors:
                    time_seqs[node].append((t, neighbors))
            
            # 创建训练三元组 (source, target, edge_type, time)
            for u, v, data in g.edges(data=True):
                edge_type = data.get('type', 'default')
                triplets.append((
                    self.node_to_idx[u], 
                    self.node_to_idx[v],
                    self.edge_type_to_idx[edge_type],
                    self.type_to_idx[g.nodes[u].get('type', 'unknown')],
                    self.type_to_idx[g.nodes[v].get('type', 'unknown')],
                    t
                ))
                
                # 负采样
                for _ in range(5):  # 每个正样本生成5个负样本
                    neg_v = random.sample(all_nodes, 1)[0]
                    if not g.has_edge(u, neg_v):
                        triplets.append((
                            self.node_to_idx[u],
                            self.node_to_idx[neg_v],
                            self.edge_type_to_idx[edge_type],
                            self.type_to_idx[g.nodes[u].get('type', 'unknown')],
                            self.type_to_idx.get(g.nodes.get(neg_v, {}).get('type', 'unknown'), 0),
                            t
                        ))
        
        # 准备时序序列数据
        node_time_seqs = {}
        for node, seq in time_seqs.items():
            if len(seq) >= self.time_steps:
                # 取最近的time_steps个时间步
                recent_seq = seq[-self.time_steps:]
                node_idx = self.node_to_idx[node]
                
                # 用每个时间步的平均邻居嵌入表示
                time_features = []
                for t, neighbors in recent_seq:
                    # 实际应用中这里需要更好的聚合策略
                    neighbor_idxs = [self.node_to_idx[n] for n in neighbors if n in self.node_to_idx]
                    if neighbor_idxs:
                        time_features.append((t, neighbor_idxs))
                    else:
                        time_features.append((t, [node_idx]))  # 自循环
                
                node_time_seqs[node_idx] = time_features
        
        return triplets, node_time_seqs
    
    def fit(self, graph_sequence):
        """训练模型"""
        triplets, node_time_seqs = self._prepare_data(graph_sequence)
        
        # 转换为张量
        source_nodes = torch.LongTensor([t[0] for t in triplets])
        target_nodes = torch.LongTensor([t[1] for t in triplets])
        edge_types = torch.LongTensor([t[2] for t in triplets])
        source_types = torch.LongTensor([t[3] for t in triplets])
        target_types = torch.LongTensor([t[4] for t in triplets])
        timestamps = torch.FloatTensor([t[5] for t in triplets])
        
        # 创建标签 (1:正样本, 0:负样本)
        labels = []
        for i, (s, t, _, _, _, _) in enumerate(triplets):
            # 每6个样本中的第一个是正样本(因为每个正样本后跟5个负样本)
            if i % 6 == 0:
                labels.append(1)
            else:
                labels.append(0)
        labels = torch.FloatTensor(labels)
        
        # 准备时序序列数据
        # 简化处理：真实场景需要更复杂的批处理逻辑
        time_seq_tensors = None  # 实际应用中需要构建真实的时序数据
        
        # 初始化模型
        self.model = self.THGRLModel(
            num_nodes=len(self.node_to_idx),
            num_node_types=len(self.type_to_idx),
            num_edge_types=len(self.edge_type_to_idx),
            embedding_dim=self.embedding_dim,
            time_steps=self.time_steps
        )
        
        # 训练
        optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate)
        criterion = nn.BCELoss()
        
        for epoch in range(self.epochs):
            self.model.train()
            optimizer.zero_grad()
            
            # 前向传播
            predictions, source_emb, target_emb = self.model(
                source_nodes, target_nodes, edge_types, source_types, target_types,
                time_seq_tensors, timestamps
            )
            
            # 计算损失
            loss = criterion(predictions, labels)
            
            # 反向传播
            loss.backward()
            optimizer.step()
            
            if (epoch+1) % 10 == 0:
                print(f"Epoch {epoch+1}/{self.epochs}, Loss: {loss.item():.4f}")
        
        # 提取学习到的嵌入
        self.model.eval()
        with torch.no_grad():
            node_idxs = torch.LongTensor(list(range(len(self.node_to_idx))))
            dummy_targets = torch.zeros_like(node_idxs)
            dummy_edges = torch.zeros_like(node_idxs)
            dummy_types = torch.zeros_like(node_idxs)
            dummy_times = torch.zeros_like(node_idxs).float()
            
            _, node_embs, _ = self.model(
                node_idxs, dummy_targets, dummy_edges, dummy_types, dummy_types,
                None, dummy_times
            )
            
            # 保存节点嵌入
            idx_to_node = {i: n for n, i in self.node_to_idx.items()}
            for i in range(len(node_embs)):
                self.node_embeddings[idx_to_node[i]] = node_embs[i].numpy()
                
        return self.node_embeddings
    
    def get_node_embedding(self, node):
        """获取节点嵌入"""
        return self.node_embeddings.get(node)
    
    def predict_link(self, source, target, timestamp=None, edge_type=None):
        """预测两个节点之间的链接概率"""
        # 确保source和target是字符串
        if isinstance(source, np.str_) or not isinstance(source, str):
            source = str(source)
        if isinstance(target, np.str_) or not isinstance(target, str):
            target = str(target)
        
        if source not in self.node_to_idx or target not in self.node_to_idx:
            return 0.0
        
        if self.model is None:
            return 0.0
            
        self.model.eval()
        with torch.no_grad():
            source_idx = torch.LongTensor([self.node_to_idx[source]])
            target_idx = torch.LongTensor([self.node_to_idx[target]])
            edge_type_idx = torch.LongTensor([
                self.edge_type_to_idx.get(edge_type, 0)
            ])
            
            # 修复：确保不调用可能是numpy.str_类型的source/target的get方法
            source_type = "unknown"
            target_type = "unknown"
            if isinstance(source, dict) and "type" in source:
                source_type = source["type"]
            if isinstance(target, dict) and "type" in target:
                target_type = target["type"]
            
            source_type_idx = torch.LongTensor([
                self.type_to_idx.get(source_type, 0)
            ])
            target_type_idx = torch.LongTensor([
                self.type_to_idx.get(target_type, 0)
            ])
            time_idx = torch.FloatTensor([timestamp or 0.0])
            
            pred, _, _ = self.model(
                source_idx, target_idx, edge_type_idx, source_type_idx, target_type_idx,
                None, time_idx
            )
            
            return pred.item() 