from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import torch
from torch import nn
from utils.dag_utils import subgraph, custom_backward_subgraph

from models.gat_conv import AGNNConv
from models.gcn_conv import AggConv
from models.deepset_conv import DeepSetConv
from models.gated_sum_conv import GatedSumConv
from models.mlp import MLP

from torch.nn import LSTM, GRU


_aggr_function_factory = {
    'aggnconv': AGNNConv,
    'deepset': DeepSetConv,
    'gated_sum': GatedSumConv,
    'conv_sum': AggConv,
}

_update_function_factory = {
    'lstm': LSTM,
    'gru': GRU,
}


class RecGNN(nn.Module):
    '''
    Recurrent Graph Neural Networks for Circuits.
    '''
    def __init__(self, args):
        super(RecGNN, self).__init__()
        
        self.args = args

        # configuration
        self.num_rounds = args.num_rounds
        
        # 检查CUDA是否可用，如果不可用，则强制使用CPU
        if hasattr(args, 'gpus') and args.gpus[0] >= 0 and torch.cuda.is_available():
            self.device = args.device
        else:
            self.device = torch.device('cpu')
            print("[INFO] CUDA不可用或未启用，使用CPU运行")
            
        self.predict_diff = args.predict_diff
        self.intermediate_supervision = args.intermediate_supervision
        self.reverse = args.reverse
        self.custom_backward = args.custom_backward
        self.use_edge_attr = args.use_edge_attr

        # dimensions
        self.num_aggr = args.num_aggr
        self.dim_node_feature = args.dim_node_feature
        self.dim_hidden = args.dim_hidden
        self.dim_mlp = args.dim_mlp
        self.dim_pred = args.dim_pred
        self.num_fc = args.num_fc
        self.wx_update = args.wx_update
        self.wx_mlp = args.wx_mlp
        self.dim_edge_feature = args.dim_edge_feature

        # 1. message/aggr-related
        dim_aggr = self.dim_hidden# + self.dim_edge_feature if self.use_edge_attr else self.dim_hidden
        if self.args.aggr_function in _aggr_function_factory.keys():
            # if self.use_edge_attr:
            #     aggr_forward_pre = MLP(self.dim_hidden, self.dim_hidden, self.dim_hidden, num_layer=3, p_drop=0.2)
            # else:
            aggr_forward_pre = nn.Linear(dim_aggr, self.dim_hidden)
            if self.args.aggr_function == 'deepset':
                aggr_forward_post = nn.Linear(self.dim_hidden, self.dim_hidden)
                self.aggr_forward = _aggr_function_factory[self.args.aggr_function](dim_aggr, self.dim_hidden, mlp=aggr_forward_pre, mlp_post=aggr_forward_post, wea=self.use_edge_attr)
            else:
                self.aggr_forward = _aggr_function_factory[self.args.aggr_function](dim_aggr, self.dim_hidden, mlp=aggr_forward_pre, wea=self.use_edge_attr)
            if self.reverse:
                # if self.use_edge_attr:
                #     aggr_backward_pre = MLP(self.dim_hidden, self.dim_hidden, self.dim_hidden, num_layer=3, p_drop=0.2)
                # else:
                aggr_backward_pre = nn.Linear(dim_aggr, self.dim_hidden)
                if self.args.aggr_function == 'deepset':
                    aggr_backward_post = nn.Linear(self.dim_hidden, self.dim_hidden)
                    self.aggr_backward = _aggr_function_factory[self.args.aggr_function](dim_aggr, self.dim_hidden, mlp=aggr_backward_pre, mlp_post=aggr_backward_post, wea=self.use_edge_attr)
                else:
                    self.aggr_backward = _aggr_function_factory[self.args.aggr_function](dim_aggr, self.dim_hidden, mlp=aggr_backward_pre, reverse=True, wea=self.use_edge_attr)
        else:
            raise KeyError('no support {} aggr function.'.format(self.args.aggr_function))


        # 2. update-related
        if self.args.update_function in _update_function_factory.keys():
            # Here only consider the inputs as the concatenated vector from embedding and feature vector.
            if self.wx_update:
                self.update_forward = _update_function_factory[self.args.update_function](self.dim_node_feature+self.dim_hidden, self.dim_hidden)
                if self.reverse:
                    self.update_backward = _update_function_factory[self.args.update_function](self.dim_node_feature+self.dim_hidden, self.dim_hidden)
            else:
                self.update_forward = _update_function_factory[self.args.update_function](self.dim_hidden, self.dim_hidden)
                if self.reverse:
                    self.update_backward = _update_function_factory[self.args.update_function](self.dim_hidden, self.dim_hidden)
        else:
            raise KeyError('no support {} update function.'.format(self.args.update_function))
        # consider the embedding for the LSTM/GRU model initialized by non-zeros
        self.one = torch.ones(1, device=self.device)
        self.emd_int = nn.Linear(1, self.dim_hidden).to(self.device)
        self.one.requires_grad = False


        # 3.1 Value Network
        output_dim = 2
        if self.args.op:
            output_dim = 3

        # self.value_network = MLP(self.dim_hidden + 2, self.dim_mlp, output_dim, 
        #     num_layer=self.num_fc, norm_layer=args.norm_layer, act_layer=args.activation_layer, sigmoid=False, tanh=False)
        
        self.value_network = nn.Sequential(
            nn.Linear(self.dim_hidden + 2, 128), 
            nn.ReLU(),
            nn.Linear(128, output_dim)
        )    

        # 3.2 Probability 
        self.predictor = MLP(self.dim_hidden, self.dim_mlp, self.dim_pred, 
            num_layer=self.num_fc, norm_layer=args.norm_layer, act_layer=args.activation_layer, sigmoid=False, tanh=False)

    def forward(self, G):
        num_nodes = G.num_nodes
        num_layers_f = max(G.forward_level).item() + 1
        num_layers_b = max(G.backward_level).item() + 1
        one = self.one
        h_init = self.emd_int(one).view(1, 1, -1) # (1 x 1 x dim_hidden)
        h_init = h_init.repeat(1, num_nodes, 1) # (1 x num_nodes x dim_hidden)

        if self.args.update_function == 'lstm':
            preds = self._lstm_forward(G, h_init, num_layers_f, num_layers_b, num_nodes)
        elif self.args.update_function == 'gru':
            preds = self._gru_forward(G, h_init, num_layers_f, num_layers_b, num_nodes)
        else:
            raise NotImplementedError('The update function should be specified as one of lstm and gru.')
        
        return preds
            
    
    def _lstm_forward(self, G, h_init, num_layers_f, num_layers_b, num_nodes):
        x, edge_index = G.x, G.edge_index
        edge_attr = G.edge_attr if self.use_edge_attr else None
        
        node_state = (h_init, torch.zeros(1, num_nodes, self.dim_hidden).to(self.device)) # (h_0, c_0). here we only initialize h_0. TODO: option of not initializing the hidden state of LSTM.
        
        # TODO: add supports for modified attention and customized backward design.
        preds = []
        for _ in range(self.num_rounds):
            for l_idx in range(1, num_layers_f):
                # forward layer
                layer_mask = G.forward_level == l_idx
                l_node = G.forward_index[layer_mask]

                l_state = (torch.index_select(node_state[0], dim=1, index=l_node), 
                            torch.index_select(node_state[1], dim=1, index=l_node))

                l_edge_index, l_edge_attr = subgraph(l_node, edge_index, edge_attr, dim=1)
                msg = self.aggr_forward(node_state[0].squeeze(0), l_edge_index, l_edge_attr)
                l_msg = torch.index_select(msg, dim=0, index=l_node)
                l_x = torch.index_select(x, dim=0, index=l_node)
                
                if self.args.wx_update:
                    _, l_state = self.update_forward(torch.cat([l_msg, l_x], dim=1).unsqueeze(0), l_state)
                else:
                    _, l_state = self.update_forward(l_msg.unsqueeze(0), l_state)

                node_state[0][:, l_node, :] = l_state[0]
                node_state[1][:, l_node, :] = l_state[1]
            if self.reverse:
                for l_idx in range(1, num_layers_b):
                    # backward layer
                    layer_mask = G.backward_level == l_idx
                    l_node = G.backward_index[layer_mask]
                    
                    l_state = (torch.index_select(node_state[0], dim=1, index=l_node), 
                                torch.index_select(node_state[1], dim=1, index=l_node))
                    if self.custom_backward:
                        l_edge_index = custom_backward_subgraph(l_node, edge_index, device=self.device, dim=0)
                    else:
                        l_edge_index, l_edge_attr = subgraph(l_node, edge_index, edge_attr, dim=0)
                    msg = self.aggr_backward(node_state[0].squeeze(0), l_edge_index, l_edge_attr)
                    l_msg = torch.index_select(msg, dim=0, index=l_node)
                    l_x = torch.index_select(x, dim=0, index=l_node)
                    
                    if self.args.wx_update:
                        _, l_state = self.update_backward(torch.cat([l_msg, l_x], dim=1).unsqueeze(0), l_state)
                    else:
                        _, l_state = self.update_backward(l_msg.unsqueeze(0), l_state)
                    
                    node_state[0][:, l_node, :] = l_state[0]
                    node_state[1][:, l_node, :] = l_state[1]
            
            # if self.intermediate_supervision:
            #     preds.append(self.predictor(node_state.squeeze(0)))

        node_embedding = node_state[0].squeeze(0)
        if self.args.pretrain:
            pred = self.predictor(node_embedding)
        else:
            pred = self.value_network(node_embedding)    
        preds.append(pred)

        return preds
    
    def _gru_forward(self, G, h_init, num_layers_f, num_layers_b, num_nodes):
        x, edge_index = G.x, G.edge_index
        edge_attr = G.edge_attr if self.use_edge_attr else None
        
        node_state = h_init # (h_0). here we initialize h_0. TODO: option of not initializing the hidden state of GRU.

        # TODO: add supports for modified attention and customized backward design.
        preds = []
        for _ in range(self.num_rounds):
            for l_idx in range(1, num_layers_f):
                # forward layer
                layer_mask = G.forward_level == l_idx
                l_node = G.forward_index[layer_mask]
                
                l_state = torch.index_select(node_state, dim=1, index=l_node)

                l_edge_index, l_edge_attr = subgraph(l_node, edge_index, edge_attr, dim=1)
                msg = self.aggr_forward(node_state.squeeze(0), l_edge_index, l_edge_attr)
                l_msg = torch.index_select(msg, dim=0, index=l_node)
                l_x = torch.index_select(x, dim=0, index=l_node)
                
                if self.args.wx_update:
                    _, l_state = self.update_forward(torch.cat([l_msg, l_x], dim=1).unsqueeze(0), l_state)
                else:
                    _, l_state = self.update_forward(l_msg.unsqueeze(0), l_state)
                node_state[:, l_node, :] = l_state
            
            if self.reverse:
                for l_idx in range(1, num_layers_b):
                    # backward layer
                    layer_mask = G.backward_level == l_idx
                    l_node = G.backward_index[layer_mask]
                    
                    l_state = torch.index_select(node_state, dim=1, index=l_node)

                    if self.custom_backward:
                        l_edge_index = custom_backward_subgraph(l_node, edge_index, device=self.device, dim=0)
                    else:
                        l_edge_index, l_edge_attr = subgraph(l_node, edge_index, edge_attr, dim=0)
                    msg = self.aggr_backward(node_state.squeeze(0), l_edge_index, l_edge_attr)
                    l_msg = torch.index_select(msg, dim=0, index=l_node)
                    l_x = torch.index_select(x, dim=0, index=l_node)
                    
                    if self.args.wx_update:
                        _, l_state = self.update_backward(torch.cat([l_msg, l_x], dim=1).unsqueeze(0), l_state)
                    else:
                        _, l_state = self.update_backward(l_msg.unsqueeze(0), l_state)                
                    
                    node_state[:, l_node, :] = l_state

            # if self.intermediate_supervision:
            #     preds.append(self.predictor(node_state.squeeze(0)))

        node_embedding = node_state.squeeze(0)
        if self.args.pretrain:
            pred = self.predictor(node_embedding)
        else:
            extra_feature = torch.tensor([G.cp_idx, G.cp_tot]).repeat(len(node_embedding), 1)
            node_embedding = torch.cat([node_embedding, extra_feature], dim=1)
            pred = self.value_network(node_embedding)    
        preds.append(pred)

        return preds
    

    def get_emb(self, G):
        num_nodes = G.num_nodes
        num_layers_f = max(G.forward_level).item() + 1
        num_layers_b = max(G.backward_level).item() + 1
        one = self.one
        h_init = self.emd_int(one).view(1, 1, -1) # (1 x 1 x dim_hidden)
        h_init = h_init.repeat(1, num_nodes, 1) # (1 x num_nodes x dim_hidden)

        x, edge_index = G.x, G.edge_index
        edge_attr = G.edge_attr if self.use_edge_attr else None
        
        node_state = h_init # (h_0). here we initialize h_0. TODO: option of not initializing the hidden state of GRU.

        # TODO: add supports for modified attention and customized backward design.
        preds = []
        for _ in range(self.num_rounds):
            for l_idx in range(1, num_layers_f):
                # forward layer
                layer_mask = G.forward_level == l_idx
                l_node = G.forward_index[layer_mask]
                
                l_state = torch.index_select(node_state, dim=1, index=l_node)

                l_edge_index, l_edge_attr = subgraph(l_node, edge_index, edge_attr, dim=1)
                msg = self.aggr_forward(node_state.squeeze(0), l_edge_index, l_edge_attr)
                l_msg = torch.index_select(msg, dim=0, index=l_node)
                l_x = torch.index_select(x, dim=0, index=l_node)
                
                if self.args.wx_update:
                    _, l_state = self.update_forward(torch.cat([l_msg, l_x], dim=1).unsqueeze(0), l_state)
                else:
                    _, l_state = self.update_forward(l_msg.unsqueeze(0), l_state)
                node_state[:, l_node, :] = l_state
            
            if self.reverse:
                for l_idx in range(1, num_layers_b):
                    # backward layer
                    layer_mask = G.backward_level == l_idx
                    l_node = G.backward_index[layer_mask]
                    
                    l_state = torch.index_select(node_state, dim=1, index=l_node)

                    if self.custom_backward:
                        l_edge_index = custom_backward_subgraph(l_node, edge_index, device=self.device, dim=0)
                    else:
                        l_edge_index, l_edge_attr = subgraph(l_node, edge_index, edge_attr, dim=0)
                    msg = self.aggr_backward(node_state.squeeze(0), l_edge_index, l_edge_attr)
                    l_msg = torch.index_select(msg, dim=0, index=l_node)
                    l_x = torch.index_select(x, dim=0, index=l_node)
                    
                    if self.args.wx_update:
                        _, l_state = self.update_backward(torch.cat([l_msg, l_x], dim=1).unsqueeze(0), l_state)
                    else:
                        _, l_state = self.update_backward(l_msg.unsqueeze(0), l_state)                
                    
                    node_state[:, l_node, :] = l_state

            # if self.intermediate_supervision:
            #     preds.append(self.predictor(node_state.squeeze(0)))

        node_embedding = node_state.squeeze(0)

        return node_embedding
    
def get_recurrent_gnn(args):
    return RecGNN(args)

def load_model(model, model_path, optimizer=None, resume=False, lr=None, lr_step=None):
    """加载模型
    
    Args:
        model: 要加载权重的模型
        model_path: 模型文件路径
        optimizer: 优化器
        resume: 是否恢复训练
        lr: 学习率
        lr_step: 学习率调整步骤
    
    Returns:
        model: 加载后的模型
        optimizer: 加载后的优化器
        start_epoch: 开始的轮次
    """
    try:
        # 确保安全反序列化所需的类都已添加
        try:
            from torch_geometric.data.data import DataEdgeAttr, DataTensorAttr
            from torch_geometric.data.storage import GlobalStorage
            from datasets.ordered_data import OrderedData
            # 将类添加到PyTorch安全序列化白名单
            import torch.serialization
            torch.serialization.add_safe_globals([DataEdgeAttr, GlobalStorage, OrderedData, DataTensorAttr])
        except Exception as e:
            print(f"[WARNING] 安全序列化设置失败: {str(e)}，但将继续尝试加载模型")
        
        # 确定设备
        device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
        
        # 首先尝试使用weights_only=False安全加载
        try:
            checkpoint = torch.load(model_path, weights_only=False, map_location=device)
            print(f"[INFO] 使用weights_only=False成功加载模型: {model_path}")
        except Exception as e:
            print(f"[WARNING] 使用weights_only=False加载失败: {str(e)}")
            print(f"[INFO] 尝试使用默认参数加载模型")
            checkpoint = torch.load(model_path, map_location=device)
            print(f"[INFO] 使用默认参数成功加载模型")
        
        start_epoch = checkpoint['epoch'] + 1
        state_dict = checkpoint['state_dict']
        model.load_state_dict(state_dict)
        
        if optimizer is not None and resume:
            optimizer.load_state_dict(checkpoint['optimizer'])
            
        if lr is not None:
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
                
        if lr_step is not None:
            for i in range(len(lr_step)):
                if lr_step[i] <= start_epoch:
                    for param_group in optimizer.param_groups:
                        param_group['lr'] = lr * (0.1 ** (i + 1))
        
        print(f'[INFO] 从 {model_path} 加载模型成功，开始轮次: {start_epoch}')
        return model, optimizer, start_epoch
    except Exception as e:
        import traceback
        traceback.print_exc()
        print(f'[ERROR] 从 {model_path} 加载模型失败: {str(e)}')
        raise e

def save_model(model, optimizer, epoch, path):
    """保存模型
    
    Args:
        model: 要保存的模型
        optimizer: 要保存的优化器
        epoch: 当前轮次
        path: 保存路径
    """
    try:
        # 确保目录存在
        directory = os.path.dirname(path)
        if not os.path.exists(directory):
            os.makedirs(directory)
        
        # 保存模型
        state = {
            'epoch': epoch,
            'state_dict': model.state_dict(),
            'optimizer': optimizer.state_dict() if optimizer is not None else None
        }
        torch.save(state, path)
        print(f'[INFO] 模型保存到 {path} 成功')
    except Exception as e:
        import traceback
        traceback.print_exc()
        print(f'[ERROR] 保存模型到 {path} 失败: {str(e)}')
