import torch
import numpy as np
from torch.utils.data import Dataset, DataLoader
from torch import nn
from torchvision import transforms as transforms
from sklearn.model_selection import train_test_split
from net.graph_encoder import Graphencoder, Graphencoder2
from tqdm import tqdm
import util
from torch.autograd import Variable
import os

class Dp2Graph_Dataset(Dataset):
    def __init__(self, npy_path):
        data = np.load(npy_path, allow_pickle=True)
        all_data = []
        for i in range(len(data)):
            single_input = data[i][3]
            # single_input = single_input[0, :][np.newaxis, :]
            single_label = 1 - data[i][4]
            # single_label = data[i][4]
            all_data.append([single_input, single_label])
        self.input = all_data

    def __getitem__(self, item):
        x, label = self.input[item]
        x = torch.from_numpy(x).float()
        # count = label.sum()
        # return x, label, count
        return x, torch.from_numpy(label).float()

    def __len__(self):
        return len(self.input)


class Train_Dp2Graph_model:
    def __init__(self):
        self.model_root = util.get_project_root() + '/models/graph_encoder'
        self.use_GPU = True
        self.device = torch.device("cuda:0" if self.use_GPU else "cpu")
        class_weight = 1e-3 * torch.ones([1]).to(self.device)
        # self.citerion = nn.BCEWithLogitsLoss(pos_weight=class_weight)#
        self.citerion = nn.BCEWithLogitsLoss()
        self.citerion2 = nn.MSELoss()


    def compute_loss(self, x, y):

        # citerion = nn.CrossEntropyLoss()
        x = x.squeeze(0)
        xx = x.squeeze(0)
        tx = torch.t(xx)

        main_loss = self.citerion(x.view(-1,1), y.view(-1,1))
        loss = 0.9 * main_loss + 0.1 * self.citerion2(tx, xx)
        return loss, main_loss

    def compute_acc(self, x, y):
        x = x.squeeze(0)
        z = torch.where(x >= 1,torch.full_like(x, 1), torch.full_like(x, 0)).float()
        TP = torch.sum(((z == 0) & (y == 0))).float()
        TN = torch.sum(((z == 1) & (y == 1))).float()
        FP = torch.sum(((z == 0) & (y == 1))).float()
        FN = torch.sum(((z == 1) & (y == 0))).float()

        # TP = torch.sum(((z == 1) & (y == 1))).float()
        # TN = torch.sum(((z == 0) & (y == 0))).float()
        # FP = torch.sum(((z == 1) & (y == 0))).float()
        # FN = torch.sum(((z == 0) & (y == 1))).float()
        return TP.item(), TN.item(), FP.item(), FN.item()

    def train(self, model_file, data_file, epochs=100):
        all_data = Dp2Graph_Dataset(npy_path='{}/data/{}'.format(util.get_project_root(), data_file))
        train_data, test_data = train_test_split(all_data, test_size=0.3, random_state=2)
        train_loader = DataLoader(dataset=train_data, batch_size=1, shuffle=False)
        test_loader = DataLoader(dataset=test_data, batch_size=1, shuffle=False)

        # model = Graphencoder()
        begin_epoch, model, optimizer_state, scheduler_state = self.load_model(model_file,)
        optimizer = torch.optim.SGD(model.parameters(), lr=0.01, weight_decay=1e-4)
        scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1)
        if optimizer_state is not None:
            optimizer.load_state_dict(optimizer_state)
        if scheduler_state is not None:
            scheduler.load_state_dict(scheduler_state)

        if self.use_GPU:
            model.to(self.device)

        end_epochs = begin_epoch + epochs
        for epoch in range(begin_epoch, end_epochs):
            model.train()
            back_loss = 0
            total_loss = 0
            count_point = 0
            acc = 0
            rec_molecular = 0
            rec_denominator = 0

            with tqdm(train_loader, desc="training in Epoch {}/{}".format(epoch, end_epochs)) as tq:
                for step, (x, y) in enumerate(tq):
                    inputs = Variable(x.to(self.device))
                    label = Variable(y.to(self.device))

                    out = model(inputs)
                    loss, loss2 = self.compute_loss(out, label)
                    back_loss += loss

                    count_point += label.shape[-1] * label.shape[-1]
                    TP, TN, FP, FN = self.compute_acc(out, label)

                    true_pre = TP + TN
                    acc += true_pre
                    accuracy = acc/count_point

                    rec_pre = TP + FN
                    rec_denominator += rec_pre
                    rec_molecular += TP
                    recall = rec_molecular / rec_denominator

                    if step % 30 == 0:
                        optimizer.zero_grad()
                        back_loss.backward()
                        optimizer.step()
                        back_loss = 0

                    running_loss = loss2.item()
                    total_loss += running_loss

                    if step % 1 == 0:
                        tq.set_postfix(Loss=total_loss, train_acc=accuracy, train_recall = recall)

            scheduler.step(total_loss)

            test_running_loss = 0.0
            test_count_point = 0.0
            test_acc = 0.0
            test_rec_molecular = 0
            test_rec_denominator = 0

            model.eval()

            #开始评估
            if epoch % 10 == 0:
                for step,(x, y) in enumerate(test_loader):
                    inputs = Variable(x.to(self.device))
                    label = Variable(y.to(self.device))
                    out = model(inputs)
                    loss, loss2 = self.compute_loss(out, label)
                    test_running_loss += loss2.item()
                    test_count_point += label.shape[-1] * label.shape[-1]
                    TP, TN, FP, FN = self.compute_acc(out, label)
                    true_pre = TP + TN
                    test_acc += true_pre
                    test_accuracy = test_acc / test_count_point

                    rec_pre = TP + FN
                    test_rec_denominator += rec_pre
                    test_rec_molecular += TP
                    test_recall = test_rec_molecular / test_rec_denominator

                print('Epoch {}/{}, on test set loss = {:.4f}, accuracy = {:.4f}, recall = {:.4f}, {} \n'.format(
                    epoch + 1, end_epochs, test_running_loss, test_accuracy, test_recall, test_rec_denominator))

                save_states = {
                    'epoch': epoch,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'scheduler': scheduler.state_dict()
                }
                torch.save(save_states, self.model_root +
                           "/graphe_encoder_cp-{:04d}-{:.4f}-{:.4f}-{:.4f}.pth".format(epoch,
                                                                                       total_loss,
                                                                                       test_running_loss,
                                                                                       total_loss + test_running_loss
                                                                                       ), )

    def load_model(self, model_file):
        '''
        加载模型
        :param model_file: 模型文件
        :return: 网络模型
        '''
        begin_epoch = 1
        optimizer_state, scheduler_state = None, None

        if model_file is None:
            checkpoint_dir = self.model_root
            if not os.path.exists(checkpoint_dir):
                os.makedirs(checkpoint_dir)

            model_file = util.latest_checkpoint_loss(checkpoint_dir)

        # model = Graphencoder()
        model = Graphencoder2()
        if model_file is not None:
            print("loading >>> ", model_file, " ...")
            checkpoint = torch.load(model_file, map_location=self.device)
            if isinstance(checkpoint, dict):

                model.load_state_dict(checkpoint['state_dict'])

                begin_epoch = checkpoint['epoch'] + 1
                if 'optimizer' in checkpoint:
                    # optimizer.load_state_dict(checkpoint['optimizer'])
                    optimizer_state = checkpoint['optimizer']
                if 'scheduler' in checkpoint:
                    # scheduler.load_state_dict(checkpoint['scheduler'])
                    scheduler_state = checkpoint['scheduler']

        return begin_epoch, model, optimizer_state, scheduler_state


if __name__ == '__main__':
    td = Train_Dp2Graph_model()
    td.train(None, "dp_graph_train_data.npy", epochs=2000)
