import torch  
import torch.nn as nn  
import torch.nn.functional as F  
import torch_geometric  
from torch_geometric.nn import GINConv, global_mean_pool, global_max_pool, global_add_pool  
from torch_geometric.data import Data  
import numpy as np  

class GraphLevelBranch(nn.Module):  
    def __init__(self, input_dim, hidden_dim=128, num_layers=2):  
        super(GraphLevelBranch, self).__init__()  
        
        # GIN层 + 多头注意力机制  
        self.gin_convs = nn.ModuleList([  
            GINConv(nn.Sequential(  
                nn.Linear(input_dim if i == 0 else hidden_dim, hidden_dim),  
                nn.BatchNorm1d(hidden_dim),  
                nn.ReLU(),  
                nn.Linear(hidden_dim, hidden_dim)  
            )) for i in range(num_layers)  
        ])  
        
        # 多头注意力机制  
        self.attention = nn.MultiheadAttention(hidden_dim, num_heads=4)  
        
        # 降维全连接层  
        self.fc_out = nn.Linear(hidden_dim, hidden_dim)  
        
    def forward(self, data):  
        x, edge_index, batch = data.x, data.edge_index, data.batch  
        
        # GIN卷积  
        for conv in self.gin_convs:  
            x = conv(x, edge_index)  
            x = F.relu(x)  
        
        # 图级池化  
        graph_repr = global_mean_pool(x, batch)  
        
        # 注意力机制增强  
        graph_repr = graph_repr.unsqueeze(0)  
        graph_repr, _ = self.attention(graph_repr, graph_repr, graph_repr)  
        graph_repr = graph_repr.squeeze(0)  
        
        return self.fc_out(graph_repr)  

class SubgraphEnhancedBranch(nn.Module):  
    def __init__(self, input_dim, hidden_dim=128, num_layers=2):  
        super(SubgraphEnhancedBranch, self).__init__()  
        
        # 层次GNN  
        self.hierarchical_gnn = nn.ModuleList([  
            GINConv(nn.Sequential(  
                nn.Linear(input_dim if i == 0 else hidden_dim, hidden_dim),  
                nn.BatchNorm1d(hidden_dim),   
                nn.ReLU(),  
                nn.Linear(hidden_dim, hidden_dim)  
            )) for i in range(num_layers)  
        ])  
        
        # 子图聚类  
        self.subgraph_clustering = nn.Linear(input_dim, hidden_dim)  
        
        # 降维全连接层  
        self.fc_out = nn.Linear(hidden_dim, hidden_dim)  
        
    def forward(self, data):  
        x, edge_index, batch = data.x, data.edge_index, data.batch  
        
        # 子图聚类  
        x = self.subgraph_clustering(x)  
        
        # 层次GNN  
        for conv in self.hierarchical_gnn:  
            x = conv(x, edge_index)  
            x = F.relu(x)  
        
        # 子图表示池化  
        subgraph_repr = global_max_pool(x, batch)  
        
        return self.fc_out(subgraph_repr)  

class OpenSetClassifier(nn.Module):  
    def __init__(self, input_dim, num_known_classes, hidden_dim=128):  
        super(OpenSetClassifier, self).__init__()  
        
        self.fc_layers = nn.Sequential(  
            nn.Linear(input_dim, hidden_dim),  
            nn.ReLU(),  
            nn.Dropout(0.5),  
            nn.Linear(hidden_dim, hidden_dim // 2),  
            nn.ReLU(),  
            nn.Linear(hidden_dim // 2, num_known_classes + 1)  # 最后一个为未知类  
        )  
        
    def forward(self, x):  
        return self.fc_layers(x)  

class DREAM(nn.Module):  
    def __init__(self, input_dim, num_known_classes, hidden_dim=128):  
        super(DREAM, self).__init__()  
        
        # 双分支网络  
        self.graph_branch = GraphLevelBranch(input_dim, hidden_dim)  
        self.subgraph_branch = SubgraphEnhancedBranch(input_dim, hidden_dim)  
        
        # 开放集分类器  
        self.open_set_classifier = OpenSetClassifier(hidden_dim * 2, num_known_classes, hidden_dim)  
        
        # 内存银行机制  
        self.memory_bank = None  
        
    def forward(self, source_data, target_data=None):  
        # 图级和子图级表示  
        graph_repr = self.graph_branch(source_data)  
        subgraph_repr = self.subgraph_branch(source_data)  
        
        # 分支交互  
        combined_repr = torch.cat([graph_repr, subgraph_repr], dim=1)  
        
        # 开放集分类  
        predictions = self.open_set_classifier(combined_repr)  
        
        return predictions  
    
    def multi_sample_mixup(self, source_graphs, target_graphs, k=20):  
        """  
        多样本混合策略用于域对齐  
        """  
        def cosine_similarity(x1, x2):  
            return torch.cosine_similarity(x1, x2, dim=1)  
        
        # 计算源域和目标域的图表示  
        source_graph_repr = self.graph_branch(source_graphs)  
        target_graph_repr = self.graph_branch(target_graphs)  
        
        # 构建图的图(Graph-of-Graphs)  
        B_s, B_t = source_graph_repr.size(0), target_graph_repr.size(0)  
        similarity_matrix = torch.zeros((B_s, B_t))  
        
        for i in range(B_s):  
            for j in range(B_t):  
                similarity_matrix[i, j] = cosine_similarity(  
                    source_graph_repr[i].unsqueeze(0),   
                    target_graph_repr[j].unsqueeze(0)  
                )  
        
        # 选择k个最近邻  
        _, topk_indices = torch.topk(similarity_matrix, k=k, dim=1)  
        
        # 生成跨域虚拟样本  
        virtual_samples = []  
        for i in range(B_s):  
            neighbors = target_graph_repr[topk_indices[i]]  
            weights = F.softmax(similarity_matrix[i, topk_indices[i]], dim=0)  
            virtual_sample = torch.sum(neighbors * weights.unsqueeze(1), dim=0)  
            virtual_samples.append(virtual_sample)  
        
        virtual_samples = torch.stack(virtual_samples)  
        return virtual_samples  
    
    def memory_bank_update(self, embeddings, max_size=1024):  
        """  
        更新内存银行  
        """  
        if self.memory_bank is None:  
            self.memory_bank = embeddings  
        else:  
            # 先进先出策略  
            self.memory_bank = torch.cat([  
                self.memory_bank[-max_size+embeddings.size(0):],   
                embeddings  
            ])  

def train_dream(model, source_loader, target_loader, config):  
    optimizer = torch.optim.Adam(model.parameters(), lr=config['lr'])  
    ce_criterion = nn.CrossEntropyLoss()  
    
    for epoch in range(config['epochs']):  
        model.train()  
        total_loss = 0  
        
        for source_batch in source_loader:  
            optimizer.zero_grad()  
            
            # 源域分类损失  
            predictions = model(source_batch)  
            source_loss = ce_criterion(predictions, source_batch.y)  
            
            # 多样本混合域对齐损失  
            if target_loader is not None:  
                target_batch = next(iter(target_loader))  
                virtual_samples = model.multi_sample_mixup(source_batch, target_batch)  
                
                # 域对齐损失  
                domain_alignment_loss = F.kl_div(  
                    F.log_softmax(predictions, dim=1),   
                    F.softmax(virtual_samples, dim=1)  
                )  
                
                total_loss = source_loss + config['alpha'] * domain_alignment_loss  
            else:  
                total_loss = source_loss  
            
            total_loss.backward()  
            optimizer.step()  
        
        print(f"Epoch {epoch+1}/{config['epochs']}, Total Loss: {total_loss.item()}")  
    
    return model  

# 配置参数示例  
config = {  
    'input_dim': 10,      # 节点特征维度  
    'num_known_classes': 5,  
    'hidden_dim': 128,  
    'lr': 0.001,  
    'epochs': 200,  
    'alpha': 1.0,         # 域对齐损失权重  
    'k': 20               # 多样本混合的近邻数量  
}  

# 模型初始化  
model = DREAM(config['input_dim'], config['num_known_classes'], config['hidden_dim'])  

