# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Training validates the main script"""
import os
import logging
from tqdm import tqdm
import numpy as np

from mindspore.nn import Adagrad, Adam, RMSProp, SGD, BCELoss
from mindspore import ops as P
import mindspore as ms
import mindspore.numpy as ms_np
from mindspore.nn.metrics import Accuracy
from mindspore.nn import Cell, WithLossCell, TrainOneStepCell
from mindspore.profiler import Profiler
import mindspore.nn as nn

from mindspore_gl import Graph
from mindspore_gl.nn import GNNCell
from src.GraphCM_new import GraphCM
from src.trec_new import *

MINF = 1e-30


class LossNet(nn.Cell):
    def __init__(self, net):
        super(LossNet, self).__init__()
        self.net = net
        self.loss_criterion = BCELoss(reduction='none')
        self.cast = P.Cast()
        self.mean = P.ReduceMean(keep_dims=False)
    
    # src_idx, dst_idx, n_nodes, n_edges
    def construct(self, qids, uids, vids, clicks, TRUE_CLICKS, MASK, uid_src_idx, uid_dst_idx, uid_n_nodes, uid_n_edges, qid_src_idx, qid_dst_idx, qid_n_nodes, qid_n_edges):
        print(12121212121212)
        pred_logits, rels = self.net(qids, uids, vids, clicks, uid_src_idx, uid_dst_idx, uid_n_nodes, uid_n_edges, qid_src_idx, qid_dst_idx, qid_n_nodes, qid_n_edges)
        print(3333333333333333333333)
        TRUE_CLICKS = self.cast(TRUE_CLICKS, ms.float32)
        losses = self.loss_criterion(pred_logits, TRUE_CLICKS)
        
        losses = P.masked_select(losses, MASK)
        print('losses dtype is : ', losses.dtype)
        loss = self.mean(losses)
        
        return loss
        
    #def construct(self, qids, uids, vids, clicks, TRUE_CLICKS, MASK, g: Graph):
        #print(13131313)
        #return 1

class Model(object):
    def __init__(self, args, query_size, doc_size, vtype_size, dataset):
        # Config Setup
        self.args = args
        # self.logger = logging.getLogger("GraphCM")
        self.hidden_size = args.hidden_size
        self.optim_type = args.optim
        self.learning_rate = args.learning_rate
        self.weight_decay = args.weight_decay
        self.eval_freq = args.eval_freq
        self.global_step = args.load_model if args.load_model > -1 else 0
        self.patience = args.patience
        self.max_d_num = args.max_d_num
        self.writer = None
        self.zeros = P.Zeros()

        # GraphCM
        self.model = GraphCM(args, query_size, doc_size, vtype_size, dataset)

        self.optimizer = self.create_train_op()

    def create_train_op(self):
        """
        Selects the training algorithm and creates a train operation with it
        """
        if self.optim_type == 'adagrad':
            optimizer = Adagrad(self.model.trainable_params(), learning_rate=self.learning_rate,
                                weight_decay=self.args.weight_decay)
        elif self.optim_type == 'adadelta':
            optimizer = ms.nn.Adadelta(self.model.trainable_params(), learning_rate=self.learning_rate,
                                       weight_decay=self.args.weight_decay)
        elif self.optim_type == 'adam':
            optimizer = Adam(self.model.trainable_params(), learning_rate=self.learning_rate,
                             weight_decay=self.args.weight_decay)
        elif self.optim_type == 'rprop':
            optimizer = RMSProp(self.model.trainable_params(), learning_rate=self.learning_rate,
                                weight_decay=self.args.weight_decay)
        elif self.optim_type == 'sgd':
            optimizer = SGD(self.model.trainable_params(), learning_rate=self.learning_rate,
                            momentum=self.args.momentum, weight_decay=self.args.weight_decay)
        else:
            raise NotImplementedError('Unsupported optimizer: {}'.format(self.optim_type))
        return optimizer

    def compute_perplexity(self, pred_logits, TRUE_CLICKS, MASK):
        '''
        Compute the perplexity
        '''
        session_num = pred_logits.shape[1] // 10
        pos_logits = ms_np.log2(pred_logits + MINF)
        neg_logits = ms_np.log2(1. - pred_logits + MINF)
        perplexity_at_rank = ms_np.where(TRUE_CLICKS == 1, pos_logits, neg_logits)
        perplexity_at_rank = ms_np.where(MASK == True, perplexity_at_rank,
                                         self.zeros(perplexity_at_rank.shape, ms.float32))
        for session_idx in range(1, session_num):
            perplexity_at_rank[:, :10] += perplexity_at_rank[:, 10 * session_idx:10 * session_idx + 10]
        perplexity_at_rank = perplexity_at_rank[:, :10].sum(axis=0)
        
        return perplexity_at_rank

    def adjust_learning_rate(self, decay_rate=0.5):
        new_lr = self.optimizer.learning_rate * decay_rate
        P.assign(self.optimizer.learning_rate, ms.Tensor(new_lr, dtype=ms.float32))

    def save_model(self, model_dir, model_prefix):
        """
        Save the model into model_dir with model_prefix as the model indicator
        """

    def create_data(seif, batch, TRUE_CLICKS, MASK):
        for i in range(batch['qids'].shape[0]):
            yield (batch['qids'], batch['uids'], batch['vids'], batch['clicks'], TRUE_CLICKS, MASK)

    def _train_epoch(self, train_batches, dataset, metric_save, patience):
        evaluate = True
        exit_tag = False
        num_steps = self.args.num_steps  # 2w步
        check_point, batch_size = self.args.check_point, self.args.batch_size
        save_dir, save_prefix = self.args.model_dir, self.args.algo
        
        qid_graph, qid_node_embedding, qid_node = qid_nei(dataset)
        uid_graph, uid_node_embedding, uid_node = uid_nei(dataset)
        
        print(*qid_graph.get_graph())
        
        for b_idx, batch in enumerate(train_batches):
            self.global_step += 1
            # step_pbar.update(1)
            # Create TRUE_CLICKS & MASK tensor
            max_len = max(len(l) for l in batch['true_clicks'])
            TRUE_CLICKS = list(map(lambda l: l + [0] * (max_len - len(l)), batch['true_clicks']))
            TRUE_CLICKS = np.concatenate(np.expand_dims(TRUE_CLICKS, axis=0), axis=0)
            MASK = np.where(TRUE_CLICKS != 0, 1, 0)
            MASK = (MASK == 1)

            data = self.create_data(batch, TRUE_CLICKS, MASK)
            # print(data_)
            # data = data_.astype(ms.int32)
            ds_train = ms.dataset.GeneratorDataset(data, ['qids', 'uids', 'vids', 'clicks', 'TRUE_CLICKS', 'MASK'],
                                                   num_parallel_workers=12)

            opt = self.optimizer

            network = self.model
            loss = LossNet(network)
            train_net = nn.TrainOneStepCell(loss, opt)
            train_net.set_train(True)

            for iter_num, data in enumerate(ds_train):
                qids, uids, vids, clicks, TRUE_CLICKS, MASK = data
                print(qids.shape)
                print(uids.shape)
                print(vids.shape)
                print(clicks.shape)
                print(TRUE_CLICKS.shape)
                print(MASK.shape)
                qids = qids.astype(ms.int32)
                uids = uids.astype(ms.int32)
                vids = vids.astype(ms.int32)
                clicks = clicks.astype(ms.int32)
                # print(qids.shape)
                
                train_loss = train_net(qids, uids, vids, clicks, TRUE_CLICKS, MASK, *uid_graph.get_graph(), *qid_graph.get_graph())
                #train_loss = train_net(qids, uids, vids, clicks, TRUE_CLICKS, MASK, *uid_graph.get_graph())
                if iter_num % 10 == 0:
                    print(f"Iteration/Epoch: {iter_num}:{b_idx} train loss: {train_loss}")

            if self.global_step % self.eval_freq == 0:
                print("1111111111111")
                train_net.set_train(False)
                valid_batches = dataset.gen_mini_batches('valid', dataset.validset_size, shuffle=False)
                print(valid_batches)
                valid_click_loss, valid_rel_loss, valid_perplexity = self.evaluate(valid_batches, network)
                print(f"valid click_loss: {valid_click_loss} valid perplexity: {valid_perplexity}")

                test_batches = dataset.gen_mini_batches('test', dataset.testset_size, shuffle=False)
                test_click_loss, test_rel_loss, test_perplexity = self.evaluate(test_batches, network)
                print(f"test click_loss: {test_click_loss} test perplexity: {test_perplexity}")

                if valid_perplexity < metric_save:
                    metric_save = valid_perplexity
                    patience = 0
                else:
                    patience += 1
                if patience >= self.patience:
                    self.adjust_learning_rate(self.args.lr_decay)
                    self.learning_rate *= self.args.lr_decay
                    metric_save = valid_perplexity
                    patience = 0
                    self.patience += 1
            print("22222222222222222222222")
            if self.global_step >= num_steps:
                exit_tag = True
        return exit_tag, metric_save, patience

    def train(self, dataset):
        patience, metric_save = 0, 1e10
        # step_pbar = tqdm(total=self.args.num_steps)  # 进度条库，总共20000步
        exit_tag = False
        while not exit_tag:
            train_batches = dataset.gen_mini_batches('train', self.args.batch_size, shuffle=True)
            exit_tag, metric_save, patience = self._train_epoch(train_batches, dataset, metric_save, patience)

    def evaluate(self, eval_batches, model):
        total_click_loss, total_rel_loss, total_num = 0., 0., 0
        perplexity_at_rank = self.zeros(10, ms.float32)
        # 是否使用上下文管理器, 应该是不用使用的
        for b_idx, batch in enumerate(eval_batches):
            # Create TRUE_CLICKS & MASK tensor
            max_len = max(len(l) for l in batch['true_clicks'])
            TRUE_CLICKS = list(map(lambda l: l + [0] * (max_len - len(l)), batch['true_clicks']))
            TRUE_CLICKS = np.concatenate(np.expand_dims(TRUE_CLICKS, axis=0), axis=0)
            MASK = np.where(TRUE_CLICKS != 0, 1, 0)
            MASK = (MASK == 1)
            query_num = MASK.sum() // 10
            data = self.create_data(batch, TRUE_CLICKS, MASK)
            ds_eval = ms.dataset.GeneratorDataset(data, ['qids', 'uids', 'vids', 'clicks', 'TRUE_CLICKS', 'MASK'],
                                                  num_parallel_workers=12)
            for iter_num, data in enumerate(ds_eval):
                qids, uids, vids, clicks, TRUE_CLICKS, MASK = data
                print(qids.shape)
                print(uids.shape)
                print(vids.shape)
                print(clicks.shape)
                print(TRUE_CLICKS.shape)
                print(MASK.shape)
                pred_logits, click_loss = model(qids, uids, vids, clicks)
            batch_perplexity_at_rank = self.compute_perplexity(pred_logits, TRUE_CLICKS, MASK)
            perplexity_at_rank = perplexity_at_rank + batch_perplexity_at_rank
            total_click_loss += click_loss * query_num
            total_num += query_num

        click_loss = 1.0 * total_click_loss / total_num
        rel_loss = 1.0 * total_rel_loss / total_num
        perplexity = (2 ** (- perplexity_at_rank / total_num)).sum() / 10

        return click_loss, rel_loss, perplexity