import torch
import torch.nn as nn
import math
import os
import pickle
from torch_geometric.nn import global_mean_pool


# ez-transformer without mask and position encoding
class GrpahTransformer(nn.Module):
    def __init__(self, nfeat, nhead, nhid, nclass, nlayers=4, window_size=2, device=None, dropout=0.5):
        super(GrpahTransformer, self).__init__()
        encoder_layer = nn.TransformerEncoderLayer(d_model=nfeat, nhead=nhead, dim_feedforward=nhid, batch_first=True)
        self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=nlayers)
        self.nfeat = nfeat
        self.pos_embedding = nn.Embedding(1000, nfeat)  # max_nodes=1000
        self.decoder = nn.Linear(nfeat, nclass)
        self.window_size = window_size
        if device is None:
            self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        else:
            self.device = device
        self.to(self.device)

    def forward(self, data):
        data = data.to(self.device)
        src = data.x.view(-1, 1000, self.nfeat) # (batch_size, node_num, nfeature)
        src = src.to(self.device)
        positions = torch.arange(1000).expand(src.size(0), 1000).to(src.device)  # (batch, seq)
        pos_features = self.pos_embedding(positions)
        src = src + pos_features
        output = self.transformer_encoder(src)
        # print(output.shape)
        # exit()
        output = global_mean_pool(output, data.batch) 
        output = self.decoder(output)
        return output
    
class TransformerSampleVisual(nn.Module):
    def __init__(self, base_mod, vis_info_dir, device=None, dropout=0.5):
        super(TransformerSampleVisual, self).__init__()
        self.dropout = dropout
        self.vis_info_dir = vis_info_dir
        self.base_mod = base_mod
        self.model = None
        if device is None:
            self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        else:
            self.device = device
        if os.path.exists(self.base_mod):
            mod = GrpahTransformer(nfeat=128, nhead=8, nhid=256, nclass=2)
            mod.load_state_dict(torch.load(self.base_mod, map_location=torch.device("cuda:0" if torch.cuda.is_available() else "cpu")))
            self.transformer_encoder = mod.transformer_encoder
            self.nfeat = mod.nfeat
            self.decoder = mod.decoder
            self.window_size = mod.window_size
            self.pos_embedding = mod.pos_embedding
        else:
            print('ERROR: Transformer File Can Not Be Loaded!')
            exit(-1)
        self.to(self.device)
    
    def forward(self, data):
        data = data.to(self.device)
        file_name = data.file_name[0].split('/')[-5:]
        target_file_name = '@'.join(file_name).replace('.json', '.pkl')
        target_file_route = os.path.join(self.vis_info_dir, target_file_name)

        addr_ls = data.addr_ls[0]
        result = []
        src = data.x.view(-1, 1000, self.nfeat) # (batch_size, node_num, nfeature)
        src = src.to(self.device)
        positions = torch.arange(1000).expand(src.size(0), 1000).to(src.device)  # (batch, seq)
        pos_features = self.pos_embedding(positions)
        src = src + pos_features
        output = self.transformer_encoder(src)
        # print(output.shape) # torch.Size([32, 1000, 128])

        for i in range(output.shape[1]):
            out = self.decoder(output[:, i, :])
            if i >= len(addr_ls):
                result.append([out, -1])
            else:
                result.append([out, addr_ls[i]])

        if (not os.path.exists(target_file_route)):
            f = open(target_file_route, 'wb')
            pkl_dict = {}
            pkl_dict[target_file_name] = result
            pickle.dump(pkl_dict, f)
            f.close()
        else:
            f = open(target_file_route, 'rb')
            pkl_dict = pickle.load(f)
            pkl_dict[target_file_name] = result
            f.close()
            f = open(target_file_route, 'wb')
            pickle.dump(pkl_dict, f)
            f.close()

        output = global_mean_pool(output, data.batch) 
        output = self.decoder(output)
        return output
        

if __name__ == '__main__':

    pass