import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
#from model import GTN
import pdb
import pickle
import argparse
from utils import f1_score

import dgl
from GNN import GCN, GAT

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--dataset', type=str,
                        help='Dataset')
    parser.add_argument('--model', type=str)
    parser.add_argument('--epoch', type=int, default=200,
                        help='Training Epochs')
    parser.add_argument('--node_dim', type=int, default=64,
                        help='Node dimension')
    parser.add_argument('--lr', type=float, default=0.005,
                        help='learning rate')
    parser.add_argument('--weight_decay', type=float, default=0.0005,
                        help='l2 reg')
    parser.add_argument('--num_layers', type=int, default=3,
                        help='number of layer')
    parser.add_argument('--adaptive_lr', type=str, default='false',
                        help='adaptive learning rate')
    parser.add_argument('--dropout', type=float, default=0.6)
    parser.add_argument('--n_heads', type=int, default=8)
    parser.add_argument('--slope', type=float, default=0.05)

    args = parser.parse_args()
    print(args)
    epochs = args.epoch
    node_dim = args.node_dim
    lr = args.lr
    weight_decay = args.weight_decay
    num_layers = args.num_layers
    adaptive_lr = args.adaptive_lr
    dropout = args.dropout
    n_heads = args.n_heads

    with open('data/'+args.dataset+'/node_features.pkl','rb') as f:
        node_features = pickle.load(f)
    with open('data/'+args.dataset+'/edges.pkl','rb') as f:
        edges = pickle.load(f)
    with open('data/'+args.dataset+'/labels.pkl','rb') as f:
        labels = pickle.load(f)
    num_nodes = edges[0].shape[0]

    g = dgl.DGLGraph(sum(edges))
    #g.from_scipy(sum(edges))
    g = dgl.remove_self_loop(g)
    g = dgl.add_self_loop(g)
    g = g.to('cuda')

    """for i,edge in enumerate(edges):
        if i ==0:
            A = torch.from_numpy(edge.todense()).type(torch.FloatTensor).unsqueeze(-1)
        else:
            A = torch.cat([A,torch.from_numpy(edge.todense()).type(torch.FloatTensor).unsqueeze(-1)], dim=-1)
    A = torch.cat([A,torch.eye(num_nodes).type(torch.FloatTensor).unsqueeze(-1)], dim=-1)"""

    
    node_features = torch.from_numpy(node_features).type(torch.FloatTensor).cuda()
    train_node = torch.from_numpy(np.array(labels[0])[:,0]).type(torch.LongTensor).cuda()
    train_target = torch.from_numpy(np.array(labels[0])[:,1]).type(torch.LongTensor).cuda()
    valid_node = torch.from_numpy(np.array(labels[1])[:,0]).type(torch.LongTensor).cuda()
    valid_target = torch.from_numpy(np.array(labels[1])[:,1]).type(torch.LongTensor).cuda()
    test_node = torch.from_numpy(np.array(labels[2])[:,0]).type(torch.LongTensor).cuda()
    test_target = torch.from_numpy(np.array(labels[2])[:,1]).type(torch.LongTensor).cuda()
    
    num_classes = torch.max(train_target).item()+1
    # print(node_features.size())
    final_f1 = 0
    heads = ([n_heads] * num_layers) + [1]
    for l in range(1):
        if args.model == 'gat':
            model = GAT(g, num_layers, node_features.size()[1], node_dim, num_classes, heads, F.elu, dropout, dropout, args.slope, False).cuda()
        elif args.model == 'gcn':
            model = GCN(g, node_features.size()[1], node_dim, num_classes, num_layers, F.relu, dropout).cuda()
        """GTN(num_edge=A.shape[-1],
                            num_channels=num_channels,
                            w_in = node_features.shape[1],
                            w_out = node_dim,
                            num_class=num_classes,
                            num_layers=num_layers,
                            norm=norm)"""
        if adaptive_lr == 'false':
            optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
        else:
            optimizer = torch.optim.Adam([{'params':model.weight},
                                        {'params':model.linear1.parameters()},
                                        {'params':model.linear2.parameters()},
                                        {"params":model.layers.parameters(), "lr":0.5}
                                        ], lr=0.005, weight_decay=0.001)
        loss_fn = nn.CrossEntropyLoss()
        # Train & Valid & Test
        best_val_loss = 10000
        best_test_loss = 10000
        best_train_loss = 10000
        best_train_f1 = 0
        best_val_f1 = 0
        best_test_f1 = 0
        
        for i in range(epochs):
            #for param_group in optimizer.param_groups:
            #    if param_group['lr'] > 0.005:
            #        param_group['lr'] = param_group['lr'] * 0.9
            print('Epoch:  ',i+1)
            model.zero_grad()
            model.train()
            y = model(node_features)
            train_loss = loss_fn(y[train_node], train_target)
            train_f1 = torch.mean(f1_score(torch.argmax(y[train_node].detach(),dim=1), train_target, num_classes=num_classes)).cpu().numpy()
            print('Train - Loss: {}, Macro_F1: {}'.format(train_loss.detach().cpu().numpy(), train_f1))
            train_loss.backward()
            optimizer.step()
            model.eval()
            # Valid
            with torch.no_grad():
                y = model.forward(node_features)
                val_loss = loss_fn(y[valid_node], valid_target)
                val_f1 = torch.mean(f1_score(torch.argmax(y[valid_node],dim=1), valid_target, num_classes=num_classes)).cpu().numpy()
                print('Valid - Loss: {}, Macro_F1: {}'.format(val_loss.detach().cpu().numpy(), val_f1))
                y = model.forward(node_features)
                test_loss = loss_fn(y[test_node], test_target)
                test_f1 = torch.mean(f1_score(torch.argmax(y[test_node],dim=1), test_target, num_classes=num_classes)).cpu().numpy()
                print('Test - Loss: {}, Macro_F1: {}\n'.format(test_loss.detach().cpu().numpy(), test_f1))
            if val_f1 > best_val_f1:
                best_val_loss = val_loss.detach().cpu().numpy()
                best_test_loss = test_loss.detach().cpu().numpy()
                best_train_loss = train_loss.detach().cpu().numpy()
                best_train_f1 = train_f1
                best_val_f1 = val_f1
                best_test_f1 = test_f1 
        print('---------------Best Results--------------------')
        print('Train - Loss: {}, Macro_F1: {}'.format(best_train_loss, best_train_f1))
        print('Valid - Loss: {}, Macro_F1: {}'.format(best_val_loss, best_val_f1))
        print('Test - Loss: {}, Macro_F1: {}'.format(best_test_loss, best_test_f1))
        final_f1 += best_test_f1
