import copy
from tqdm import tqdm
import mindspore as ms
from mindspore import nn
from mindspore.nn.optim import *
# from mindspore.nn import TrainOneStepCell
import logging
import numpy as np
from sklearn.metrics import roc_auc_score
import dgl

from .loss import bce_with_logits


class ModelHelper:
    def __init__(self, model, log_dir=''):
        self.model = model
        self.model.set_grad()
        self.log_dir = log_dir
        # self.grad = ms.ops.GradOperation(get_by_list=True)

    def reload(self, loadpath):
        logging.info('reloading...')
        load_state = ms.load_checkpoint(loadpath + '.ckpt')
        model_state = self.model.parameters_dict()
        model_state.update(load_state)
        ms.load_param_into_net(self.model, model_state)

    def evaluate(self, test_task, test_data, loadpath=None):
        if loadpath:
            self.reload(loadpath)
        metric = test_task(self, test_data)
        return metric

    def train_eval(self, train_task, test_task,
                   train_data, valid_data, test_data=None,
                   savepath=None, loadpath=None, reload=False, only_eval=False,
                   n_epochs=10, lr=0.01, l2=0, begin_valid_epoch=0, early_stop=0,
                   show_progress=True,
                   topk=False, best_metrics=None, small_better=None, ref=0,
                   optimizer='Adam',
                   log=False, comment=''):
        # self.writer = SummaryWriter(log_dir=self.log_dir + comment)
        if reload:
            if loadpath is None:
                loadpath = savepath
            self.reload(loadpath)
            if only_eval and test_data:
                self.evaluate(test_task, test_data)
                return
        self.optimizer = globals()[optimizer](self.model.trainable_params(),
                                              learning_rate=lr, weight_decay=l2)
        self.trainer = TrainOneStepCell(LinkPredictor(self.model, bce_with_logits), self.optimizer)
        if small_better is None:
            n_metric = 2
            small_better = [False] * n_metric
        else:
            n_metric = len(small_better)
        best_epoch = [-1] * n_metric
        if best_metrics is None:
            best_metrics = [1e5 if small else 0 for small in small_better]

        change = False
        decrease_cnt = 0
        checkpoint = self.model.parameters_dict()

        for epoch in range(n_epochs):
            avgc = self.fit(train_data, train_task, show_progress)
            # if log:
            #     self.writer.add_scalar('Train/Loss', avgc, epoch)
            if np.isnan(avgc):
                logging.error('Epoch {}: NaN error!'.format(str(epoch)))
                return
            logging.info('Epoch{}\tloss: {:.6f}'.format(epoch, avgc))

            if epoch >= begin_valid_epoch:
                self.model.set_train(False)
                metric = test_task(self, valid_data)
                for i, m in enumerate(metric):
                    if topk:
                        m = m[ref]
                    # if log:
                    #     self.writer.add_scalar('Valid/Metrics/%d' % i, m, epoch)
                    if (best_metrics[i] < m) ^ small_better[i]:
                        best_metrics[i], best_epoch[i] = m, epoch
                        change = True
                        decrease_cnt = 0
                        if savepath and i == len(metric) - 1:
                            checkpoint = copy.deepcopy(self.model.parameters_dict())
                    else:
                        decrease_cnt += 1
                        if decrease_cnt >= early_stop >= 0:
                            if savepath and i == len(metric) - 1:
                                logging.info('Saving model to ' + savepath)
                                ms.load_param_into_net(self.model, checkpoint)
                                ms.save_checkpoint(self.model, savepath)
                            logging.info('BEST Valid Metric:\t' + str(best_metrics))
                            return best_metrics
                logging.info('best_epoch: ' + str(best_epoch) + '\tdecrease_step: ' + str(decrease_cnt))
                logging.info('valid: ' + str(metric))
                if test_data and change:
                    test_metric = test_task(self, test_data)
                    for i, m in enumerate(test_metric):
                        if topk:
                            m = m[ref]
                        # if log:
                        #     self.writer.add_scalar('Test/Metrics/%d' % i, m, epoch)
                    logging.info('test:  ' + str(test_metric))
                    change = False
        ms.load_param_into_net(self.model, checkpoint)
        logging.info('BEST Valid Metric:\t' + str(best_metrics))
        if savepath:
            logging.info('Saving model to ' + savepath)
            ms.save_checkpoint(self.model, savepath)

    def fit(self, data, task, show_progress=True):
        self.model.set_train()
        c = []
        if isinstance(data, list):
            iterator = tqdm(zip(*data), total=len(data[0])) if show_progress else zip(*data)
        else:
            iterator = tqdm(data) if show_progress else data
        for niter, input_batch in enumerate(iterator):
            cost = task(input_batch)
            # cost, grads = ms.ops.value_and_grad(task, None, weights=self.optimizer.parameters)(input_batch)
            # self.optimizer(grads)
            c.append(cost.asnumpy().item())
            if show_progress:
                iterator.set_description('loss: %.4f' % c[-1])
            if ms.ops.isnan(cost):
                break
        return np.mean(c)


class LinkPredictor(nn.Cell):
    def __init__(self, backbone, loss_fn):
        super(LinkPredictor, self).__init__(auto_prefix=False)
        self._backbone = backbone
        self._loss_fn = loss_fn

    def construct(self, *inputs):
        # pos_s, neg_s = [], []
        pos_score, neg_score = self._backbone(*inputs)
        cost = []
        for r in pos_score.keys():
            if pos_score[r].shape[0] > 0:
                cost.append(self._loss_fn(pos_score[r], neg_score[r]))
        return sum(cost)

    @property
    def backbone_network(self):
        return self._backbone


class TrainOneStepCell(nn.TrainOneStepCell):
    def __init__(self, network, optimizer):
        super(TrainOneStepCell, self).__init__(network, optimizer)
        self.grad = ms.ops.GradOperation(get_by_list=True)

    def construct(self):
        loss = self.network()
        grads = self.grad(self.network, self.weights)()
        loss = ms.ops.functional.depend(loss, self.optimizer(grads))
        return loss


def inference_heter(model, nodes_dataloader, result_dict):
    result = dict(result_dict)
    G = nodes_dataloader.collator.g
    for input_nodes, output_nodes, mfgs in nodes_dataloader:
        ndata = model.forward(mfgs).dstdata
        # result.append(ndata)
        for key in ndata.keys():
            if key in ['features', '_ID']:
                continue
            if key not in result:
                result[key] = {}
            for ntype in ndata[key].keys():
                if ntype in ndata[key] and ndata[key][ntype].shape[0] > 0:
                    if ntype not in result[key]:
                        result[key][ntype] = ms.numpy.zeros((G.num_nodes(ntype),
                                                             *ndata[key][ntype].shape[1:]),
                                                            dtype=ndata[key][ntype].dtype)
                    result[key][ntype][mfgs[-1].dstdata[dgl.NID][ntype]] = ndata[key][ntype]
    return result


def inference(model, nodes_dataloader, result_dict):
    if not nodes_dataloader.collator.g.is_homogeneous:
        return inference_heter(model, nodes_dataloader, result_dict)


def test_link_prediction(gnnhelper, data, key='h'):
    result_dict = gnnhelper.all_nodes_dataloader.collator.g.dstdata
    all_hiddens = inference(gnnhelper.model, gnnhelper.all_nodes_dataloader, result_dict)['h']
    gnnhelper.all_nodes_dataloader.collator.g.dstdata['h'] = all_hiddens
    preds = ms.numpy.zeros((0))
    gt = ms.numpy.zeros((0))
    for batch in data:
        u, v, neg = batch['user'].astype(ms.int32), batch['item'].astype(ms.int32), batch['neg'].astype(ms.int32)
        u, v = u.view(-1, 1), v.view(-1, 1)
        v = ms.ops.concat([v, neg], -1)
        score = gnnhelper.classifier.predict(all_hiddens['user'][u, :], all_hiddens['item'][v, :])
        label = ms.numpy.zeros_like(score)
        label[:, 0] = 1
        pred = ms.ops.Sigmoid()(score)
        if ms.ops.isnan(pred).sum() > 0:
            print(pred)
        preds = ms.ops.concat((preds, pred.view(-1)), 0)
        gt = ms.ops.concat((gt, label.view(-1)), 0)
    AUC = roc_auc_score(gt.asnumpy(), preds.asnumpy())
    # RMSE = math.sqrt(mean_squared_error(gt, preds))
    # return AUC, RMSE
    return [AUC]


class GNNHelper(ModelHelper):
    def __init__(self, model, classifier, all_nodes_dataloader,
                 log_dir=''):
        super().__init__(model, log_dir)
        self.classifier = classifier
        self.all_nodes_dataloader = all_nodes_dataloader

    def fit_link_prediction(self, input_batch, key='h', ):
        input_nodes, pos_graph, neg_graph, blocks = self.model.sampler.sample(self.model.g, input_batch)
        # input_nodes, pos_graph, neg_graph, blocks = self.model._prepare_pairs(input_batch)
        self.model.blocks = blocks
        self.model.pos_graph = pos_graph
        self.model.neg_graph = neg_graph
        cost = self.trainer()
        return cost


