import torch
import numpy as np
from torch.utils.data import Dataset, DataLoader
import scipy.sparse as sp
from torch import nn
from torchvision import transforms as transforms
from sklearn.model_selection import train_test_split
from net.graph_encoder import Graphencoder
from net.GCN import GraphMatrixCompletion, GraphMatrixCompletion2, GraphMatrixCompletion3
from tqdm import tqdm
import util
from torch.autograd import Variable
import os

WORD_DIM = 937 # WORD_DIM = 323

class Dp2Graph_Dataset(Dataset):
    def __init__(self, npy_path):
        self.use_GPU = True
        self.device = torch.device("cuda:0" if self.use_GPU else "cpu")
        data = np.load(npy_path, allow_pickle=True)
        sent_data = []
        s_data = data[0]
        for i in range(len(s_data)):
            adjacency = []
            adjacencies = s_data[i][0]
            for j in range(len(adjacencies)):
                support = sp.csr_matrix((np.array(adjacencies[j][0]), (adjacencies[j][1][0], adjacencies[j][1][1])),
                                        shape=(WORD_DIM, WORD_DIM), dtype=np.float32)
                adjacency.append(support)
            source_indices = s_data[i][1]
            target_indices = s_data[i][2]
            labels = s_data[i][3]
            # train_mask = s_data[i][4]
            # sent_data.append([adjacencies, source_indices,
            #                  target_indices, labels, train_mask])
            sent_data.append([adjacency, source_indices,
                              target_indices, labels])
        self.input = sent_data

    def to_torch_sparse_tensor(self, x):
        if not sp.isspmatrix_coo(x):
            x = sp.coo_matrix(x)
        row, col = x.row, x.col
        data = x.data
        indices = torch.from_numpy(np.array([row, col]).astype('int64')).long()
        values = torch.from_numpy(np.array(data).astype(np.float32))
        th_sparse_tensor = torch.sparse.FloatTensor(indices, values, (WORD_DIM, WORD_DIM)).to(self.device)

        return th_sparse_tensor

    def globally_normalize_bipartite_adjacency(self, adjacencies, symmetric=True):
        """ Globally Normalizes set of bipartite adjacency matrices """

        print('{} normalizing bipartite adj'.format(
            ['Asymmetrically', 'Symmetrically'][symmetric]))

        adj_tot = np.sum([adj for adj in adjacencies])
        degree_u = np.asarray(adj_tot.sum(1)).flatten()
        degree_v = np.asarray(adj_tot.sum(0)).flatten()

        # set zeros to inf to avoid dividing by zero
        degree_u[degree_u == 0.] = np.inf
        degree_v[degree_v == 0.] = np.inf

        degree_u_inv_sqrt = 1. / np.sqrt(abs(degree_u))
        degree_v_inv_sqrt = 1. / np.sqrt(abs(degree_v))
        degree_u_inv_sqrt_mat = sp.diags([degree_u_inv_sqrt], [0])
        degree_v_inv_sqrt_mat = sp.diags([degree_v_inv_sqrt], [0])

        degree_u_inv = degree_u_inv_sqrt_mat.dot(degree_u_inv_sqrt_mat)

        if symmetric:
            adj_norm = [degree_u_inv_sqrt_mat.dot(adj).dot(
                degree_v_inv_sqrt_mat) for adj in adjacencies]

        else:
            adj_norm = [degree_u_inv.dot(adj) for adj in adjacencies]

        return adj_norm

    def __getitem__(self, item):
        # adjacencies, source_indices, target_indices, labels, train_mask = self.input[item]
        adjacencies, source_indices, target_indices, labels = self.input[item]
        adjacencies = self.globally_normalize_bipartite_adjacency(adjacencies, False)
        adjacencies = [self.to_torch_sparse_tensor(adj) for adj in adjacencies]
        source_indices = torch.from_numpy(np.array(source_indices)).long()
        target_indices = torch.from_numpy(np.array(target_indices)).long()
        labels = torch.from_numpy(np.array(labels)).long()
        # train_mask = torch.from_numpy(np.array(train_mask)).bool()
        return adjacencies, source_indices, target_indices, labels

    def __len__(self):
        return len(self.input)


class Train_GCN_model:
    def __init__(self):
        self.model_root = util.get_project_root() + '/models/gcn_encoder'
        self.model = GraphMatrixCompletion(WORD_DIM, 5, 520, 10, 100, num_basis=2)
        # self.model = GraphMatrixCompletion2(WORD_DIM, 5, 520, 10, 100, num_basis=2)
        # self.model = GraphMatrixCompletion3(WORD_DIM, 5, 260, 10, 100, num_basis=2)
        self.model_name = 'gcn'
        self.use_GPU = True
        self.device = torch.device("cuda:0" if self.use_GPU else "cpu")
        self.criterion = nn.CrossEntropyLoss().to(self.device)

    def compute_acc(self, x, y):
        x = torch.softmax(x, -1)
        z = torch.argmax(x, -1)
        TP = torch.sum(((z == 1) & (y == 1))).float()
        TN = torch.sum(((z == 0) & (y == 0))).float()
        FP = torch.sum(((z == 1) & (y == 0))).float()
        FN = torch.sum(((z == 0) & (y == 1))).float()
        return TP.item(), TN.item(), FP.item(), FN.item()

    def train(self, model_file, data_file, epochs=100):
        all_data = Dp2Graph_Dataset(npy_path='{}/data/{}'.format(util.get_project_root(), data_file))
        train_data, test_data = train_test_split(all_data, test_size=0.3, random_state=None)
        train_loader = DataLoader(dataset=train_data, batch_size=1, shuffle=True)
        test_loader = DataLoader(dataset=test_data, batch_size=1, shuffle=False)
        data = np.load('{}/data/gcn_train_data.npy'.format(util.get_project_root()), allow_pickle=True)
        side_feature = data[1]
        identity_feature = data[2]
        side_feature = torch.from_numpy(side_feature).float()
        identity_feature = torch.from_numpy(identity_feature).float()

        # model = Graphencoder()
        begin_epoch, model, optimizer_state, scheduler_state = self.load_model(model_file, )
        optimizer = torch.optim.SGD(model.parameters(), lr=0.02, weight_decay=1e-4) # weight_decay=1e-4
        scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1)
        if optimizer_state is not None:
            optimizer.load_state_dict(optimizer_state)
        if scheduler_state is not None:
            scheduler.load_state_dict(scheduler_state)

        if self.use_GPU:
            model.to(self.device)

        end_epochs = begin_epoch + epochs
        for epoch in range(begin_epoch, end_epochs):
            model.train()
            back_loss = 0
            total_loss = 0
            count_point = 0
            acc = 0
            rec_molecular = 0
            rec_denominator = 0

            with tqdm(train_loader, desc="training in Epoch {}/{}".format(epoch, end_epochs)) as tq:
                for step, (adjacencies, source_indices, target_indices,
                           labels) in enumerate(tq):
                    side_feature = Variable(side_feature.to(self.device)).squeeze(0)
                    identity_feature = Variable(identity_feature.to(self.device)).squeeze(0)
                    source_indices = Variable(source_indices.to(self.device)).squeeze(0)
                    target_indices = Variable(target_indices.to(self.device)).squeeze(0)
                    labels = Variable(labels.to(self.device)).squeeze(0)
                    # train_mask = Variable(train_mask.to(self.device)).squeeze(0)
                    model_inputs = (adjacencies, identity_feature, side_feature, source_indices, target_indices)
                    logits = model(*model_inputs)

                    loss = self.criterion(logits, labels)
                    back_loss += loss

                    count_point += len(labels)
                    TP, TN, FP, FN = self.compute_acc(logits, labels)

                    true_pre = TP + TN
                    acc += true_pre
                    accuracy = acc / count_point

                    rec_pre = TP + FN
                    rec_denominator += rec_pre
                    rec_molecular += TP
                    if rec_denominator > 0:
                        recall = rec_molecular / rec_denominator
                    else:
                        recall = None

                    if step % 40 == 0:
                        optimizer.zero_grad()
                        back_loss.backward()
                        optimizer.step()
                        back_loss = 0

                    running_loss = loss.item()
                    total_loss += running_loss

                    if step % 1 == 0:
                        tq.set_postfix(Loss=total_loss, train_acc=accuracy, train_recall=recall)

                    # if epoch == 1 :
                    #     print(logits)
                    #     print(labels)

            scheduler.step(total_loss)

            test_running_loss = 0.0
            test_count_point = 0.0
            test_acc = 0.0
            test_rec_molecular = 0
            test_rec_denominator = 0

            model.eval()

            # 开始评估
            if epoch % 10 == 0:
                for step, (adjacencies, source_indices, target_indices, labels) in enumerate(test_loader):
                    side_feature = Variable(side_feature.to(self.device)).squeeze(0)
                    identity_feature = Variable(identity_feature.to(self.device)).squeeze(0)
                    source_indices = Variable(source_indices.to(self.device)).squeeze(0)
                    target_indices = Variable(target_indices.to(self.device)).squeeze(0)
                    labels = Variable(labels.to(self.device)).squeeze(0)
                    # train_mask = Variable(train_mask.to(self.device)).squeeze(0)
                    model_inputs = (adjacencies, identity_feature, side_feature, source_indices, target_indices)
                    logits = model(*model_inputs)
                    loss = self.criterion(logits, labels)
                    test_running_loss += loss.item()
                    test_count_point += len(labels)
                    TP, TN, FP, FN = self.compute_acc(logits, labels)
                    true_pre = TP + TN
                    test_acc += true_pre
                    test_accuracy = test_acc / test_count_point

                    rec_pre = TP + FN
                    test_rec_denominator += rec_pre
                    test_rec_molecular += TP
                    test_recall = test_rec_molecular / test_rec_denominator

                print('Epoch {}/{}, on test set loss = {:.4f}, accuracy = {:.4f}, recall = {:.4f} \n'.format(
                    epoch + 1, end_epochs, test_running_loss, test_accuracy, test_recall))

                save_states = {
                    'epoch': epoch,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'scheduler': scheduler.state_dict()
                }
                torch.save(save_states, self.model_root +
                           "/gcn_encoder_cp-{:04d}-{:.4f}-{:.4f}-{:.4f}.pth".format(epoch,
                                                                                       total_loss,
                                                                                       test_running_loss,
                                                                                       total_loss + test_running_loss
                                                                                       ), )

    def load_model(self, model_file):
        '''
        加载模型
        :param model_file: 模型文件
        :return: 网络模型
        '''
        begin_epoch = 1
        optimizer_state, scheduler_state = None, None

        if model_file is None:
            checkpoint_dir = self.model_root
            if not os.path.exists(checkpoint_dir):
                os.makedirs(checkpoint_dir)

            model_file = util.latest_checkpoint_loss(checkpoint_dir)

        if model_file is not None:
            print("loading >>> ", model_file, " ...")
            checkpoint = torch.load(model_file)
            if isinstance(checkpoint, dict):
                model = self.model
                model.load_state_dict(checkpoint['state_dict'])

                begin_epoch = checkpoint['epoch'] + 1
                if 'optimizer' in checkpoint:
                    # optimizer.load_state_dict(checkpoint['optimizer'])
                    optimizer_state = checkpoint['optimizer']
                if 'scheduler' in checkpoint:
                    # scheduler.load_state_dict(checkpoint['scheduler'])
                    scheduler_state = checkpoint['scheduler']
            else:
                model = self.model
        else:
            model = self.model

        return begin_epoch, model, optimizer_state, scheduler_state


if __name__ == '__main__':
    td = Train_GCN_model()
    td.train(None, "gcn_train_data.npy", epochs=30)
