# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Model script"""
import os
import logging
import numpy as np

import mindspore.nn as nn
import mindspore as ms
import mindspore.ops as P
import mindspore.numpy as ms_np

from mindspore_gl.nn import GATConv
from mindspore_gl.sampling.neighbor import sage_sampler_on_homo

# from src.trec_new import *
from trec_new import *


class GraphCM(nn.Cell):
    def __init__(self, args, query_size, doc_size, vtype_size, dataset):
        super(GraphCM, self).__init__()
        self.args = args
        self.dataset = dataset
        self.logger = logging.getLogger("GraphCM")

        self.gnn_layer = DGATLayer(args, query_size, doc_size, vtype_size, self.dataset)
        self.sigmoid = nn.Sigmoid()
        # self.fill = P.fill()

        self.exam_predictor = ExamPredictor(args)

        self.rel_estimator = RelEstimator(args)

        if self.args.combine == 'exp_mul':
            self.lamda = ms.Parameter(np.zeros(1), requires_grad=True)
            self.mu = ms.Parameter(np.zeros(1), requires_grad=True)
            P.fill(ms.float32, self.lamda.shape, 1.0)
            P.fill(ms.float32, self.mu.shape, 1.0)
        elif self.args.combine == 'linear':
            self.alpha = ms.Parameter(np.zeros(1), requires_grad=True)
            self.beta = ms.Parameter(np.zeros(1), requires_grad=True)
            P.fill(ms.float32, self.alpha.shape, 0.5)
            P.fill(ms.float32, self.beta.shape, 0.5)
        elif self.args.combine == 'nonlinear':
            self.w11 = ms.Parameter(np.zeros(1), requires_grad=True)
            self.w12 = ms.Parameter(np.zeros(1), requires_grad=True)
            self.w21 = ms.Parameter(np.zeros(1), requires_grad=True)
            self.w22 = ms.Parameter(np.zeros(1), requires_grad=True)
            self.w31 = ms.Parameter(np.zeros(1), requires_grad=True)
            self.w32 = ms.Parameter(np.zeros(1), requires_grad=True)
            P.fill(ms.float32, self.w11.shape, 0.5)
            P.fill(ms.float32, self.w12.shape, 0.5)
            P.fill(ms.float32, self.w21.shape, 0.5)
            P.fill(ms.float32, self.w22.shape, 0.5)
            P.fill(ms.float32, self.w31.shape, 0.5)
            P.fill(ms.float32, self.w32.shape, 0.5)

    def combine(self, exams, rels):
        combine = self.args.combine
        if combine == 'mul':
            clicks = P.Mul(rels, exams)
        elif combine == 'exp_mul':
            clicks = P.Mul(P.Pow(rels, self.lamda), P.Pow(exams, self.mu))
        elif combine == 'linear':
            clicks = P.Add(P.Mul(rels, self.alpha), P.Mul(exams, self.beta))
        elif combine == 'nonlinear':  # 2-layer
            out1 = self.sigmoid(P.Add(P.Mul(rels, self.w11), P.Mul(exams, self.w12)))
            out2 = self.sigmoid(P.Add(P.Mul(rels, self.w21), P.Mul(exams, self.w22)))
            clicks = self.sigmoid(P.Add(P.Mul(out1, self.w31), P.Mul(out2, self.w32)))
        else:
            raise NotImplementedError('Unsupported combination type: {}'.format(combine))
        return clicks

    def construct(self, qids, uids, vids, clicks):
        print("44444444444")
        qid_embed, uid_embed, vid_embed, click_embed, pos_embed = self.gnn_layer(qids, uids, vids, clicks, self.args.use_gnn)
        print("5555555555555555")
        qu_interactions = self.gnn_layer.interact_neighs(qids, uids) if self.args.inter_neigh_sample > 0 else None
        print("6666666666")
        exams = self.exam_predictor(vid_embed, click_embed, pos_embed)

        rels = self.rel_estimator(qid_embed, uid_embed, vid_embed, click_embed, pos_embed, qu_interactions)

        pred_logits = self.combine(exams, rels)

        return pred_logits, rels


class DGATLayer(nn.Cell):
    def __init__(self, args, query_size, doc_size, vtype_size, dataset):
        super(DGATLayer, self).__init__()
        self.args = args
        self.logger = logging.getLogger("GraphCM")
        self.query_size = query_size
        self.doc_size = doc_size
        self.vtype_size = vtype_size
        self.dataset = dataset
        # self.data_dir = os.path.join('data', self.dataset)
        # if args.use_pretrain_embed:
        #     self.qid_embedding = ms.load_checkpoint(os.path.join(self.data_dir, 'embeddings/dgat_qid_embedding.ckpt'))
        #     self.uid_embedding = ms.load_checkpoint(os.path.join(self.data_dir, 'embeddings/dgat_uid_embedding.ckpt'))
        #     assert self.qid_embedding.embedding_table.data.shape[0] == query_size
        #     assert self.uid_embedding.embedding_table.data.shape[0] == doc_size
        #     assert self.qid_embedding.embedding_table.data.shape[1] == self.args.embed_size
        #     assert self.uid_embedding.embedding_table.data.shape[1] == self.args.embed_size
        # else:
        self.qid_embedding = nn.Embedding(query_size, self.args.embed_size)
        self.uid_embedding = nn.Embedding(doc_size, self.args.embed_size)
        self.click_embedding = nn.Embedding(2, self.args.click_embed_size)
        self.vid_embedding = nn.Embedding(vtype_size, self.args.vtype_embed_size)
        self.pos_embedding = nn.Embedding(10, self.args.pos_embed_size)

        if args.use_gnn:
            self.qid_edge_index = qid_(self.dataset)
            self.uid_edge_index = uid_(self.dataset)
            # self.qid_edge_index = ms.load_checkpoint(os.path.join(self.data_dir, 'dgat_qid_edge_index.ckpt'))
            # self.uid_edge_index = ms.load_checkpoint(os.path.join(self.data_dir, 'dgat_uid_edge_index.ckpt'))
            out_channel = self.args.embed_size // self.args.gnn_att_heads if self.args.gnn_concat else self.args.embed_size
            self.qid_GAT = GATConv(self.args.embed_size, out_channel, num_attn_head=self.args.gnn_att_heads,
                                   input_drop_out_rate=self.args.gnn_dropout, attn_drop_out_rate=self.args.gnn_dropout,
                                   leaky_relu_slope=self.args.gnn_leaky_slope)
            self.uid_GAT = GATConv(self.args.embed_size, out_channel, num_attn_head=self.args.gnn_att_heads,
                                   input_drop_out_rate=self.args.gnn_dropout, attn_drop_out_rate=self.args.gnn_dropout,
                                   leaky_relu_slope=self.args.gnn_leaky_slope)

        if self.args.inter_neigh_sample > 0:
            self.uid_neighbors = dgat_uid_neighor()
            # self.uid_neighbors = ms.load_checkpoint(os.path.join(self.data_dir, 'dgat_uid_neighbors.ckpt'))
            self.interact_attention = nn.Dense(self.args.embed_size * 2, 1)
            self.interact_activation = nn.LeakyReLU(self.args.inter_leaky_slope)
        self.expand_dims = P.ExpandDims()

    def construct(self, qids, uids, vids, clicks, use_gnn=True):
        # Get click/vid/position embeddings
        print("1111111111111")
        batch_size = clicks.shape[0]
        seq_len = clicks.shape[1]
        click_embedding = self.click_embedding(clicks)      # [batch_size, seq_len, click_embed_size]
        vid_embedding = self.vid_embedding(vids)            # [batch_size, seq_len, vtype_embed_size]
        pos_embedding = ms_np.tile(self.expand_dims(self.pos_embedding.embedding_table, 0), (batch_size, seq_len // 10, 1))  # (bs, seq_len, embed_size)
        if use_gnn:
            qid_nodess = ms_np.arange(0, self.dataset.query_size)
            qid_neighbor_sampler = sage_sampler_on_homo(homo_graph=self.qid_edge_index,
                                                        seeds=qid_nodess.astype(np.int32),
                                                        neighbor_nums=[self.args.gnn_neigh_sample])

            uid_nodess = ms_np.arange(0, self.dataset.doc_size)
            uid_neighbor_sampler = sage_sampler_on_homo(homo_graph=self.uid_edge_index,
                                                        seeds=uid_nodess.astype(np.int32),
                                                        neighbor_nums=[self.args.gnn_neigh_sample])
            cnt = 0
            for _, sampled_qid, sampled_index_tuple in qid_neighbor_sampler:  # _代表bs就是mini-batch的节点数目，n_id:L层采样中遇到的所有节点的list，其中target节点在list最前端；adjs：第L层到第1层采样结果的list（包含三个属性）
                assert cnt < 1
                cnt += 1
                sampled_qid, sampled_index = qid_neighbor_sampler.all_nodes, sampled_index_tuple[0]
                sampled_qid_embed = self.qid_embedding(sampled_qid)
                processed_qid_embed = P.ReLU(self.qid_GAT(sampled_qid_embed, sampled_index).type
                                             (ms.float32))
                argsort_sampled_qid = P.Sort(sampled_qid)

            cnt = 0
            for _, sampled_uid, sampled_index_tuple in uid_neighbor_sampler:
                assert cnt < 1
                cnt += 1
                sampled_uid, sampled_index = sampled_uid, sampled_index_tuple[0]
                sampled_uid_embed = self.uid_embedding(sampled_uid)
                processed_uid_embed = P.ReLU(self.uid_GAT(sampled_uid_embed, sampled_index).type
                                             (ms.float32))
                argsort_sampled_uid = P.Sort(sampled_uid)
            QIDS = [ms.Tensor.from_numpy(np.array(qid, dtype=np.int64))[:-1] for qid in qids]
            UIDS = [ms.Tensor.from_numpy(np.array(uid, dtype=np.int64))[:-1] for uid in uids]
            qid_embedding = nn.embedding(nn.embedding(QIDS, argsort_sampled_qid), processed_qid_embed)
            uid_embedding = nn.embedding(nn.embedding(UIDS, argsort_sampled_uid), processed_uid_embed)
        else:
            QIDS = [ms.Tensor.from_numpy(np.array(qid, dtype=np.int64))[:-1] for qid in qids]
            UIDS = [ms.Tensor.from_numpy(np.array(uid, dtype=np.int64))[:-1] for uid in uids]
            qid_embedding = self.qid_embedding(QIDS)
            uid_embedding = self.uid_embedding(UIDS)

        return qid_embedding, uid_embedding, vid_embedding, click_embedding, pos_embedding

    def interact_neighs(self, qids, uids):
        # QIDS = [ms.Tensor.from_numpy(np.array(qid, dtype=np.int64))[:-1] for qid in qids]
        # UIDS = [ms.Tensor.from_numpy(np.array(uid, dtype=np.int64))[:-1] for uid in uids]
        batch_size = uids.shape[0]
        seq_len = qids.shape[1]

        qids_extended = ms_np.tile(self.expand_dims(qids, 2), (1, 1, 10)).view(batch_size, seq_len)     # (bs, seq_len)
        qids_extended = ms_np.tile(self.expand_dims(qids_extended, 2), (1, 1, self.args.inter_neigh_sample))    # (bs, seq_len, inter_neigh_sample)
        qids_embed = self.qid_embedding(qids_extended)      # (bs, seq_len, inter_neigh_sample, embed_size)

        uids_perm_idx = P.Randperm(self.uid_neighbors.embedding_table.data.shape[1])
        uids_neigh_idx = self.uid_neighbors(uids)[:, :, uids_perm_idx[:self.args.inter_neigh_sample]]   # (bs, seq_len, iner_neigh_sample)
        uids_neigh = self.uid_embedding(uids_neigh_idx.to(ms.int64))        # (bs, seq_len, inter_neigh_sample, embed_size)

        qu_interactions = P.Mul(qids_embed, uids_neigh)     # (bs, seq_len, inter_neigh_sample, embed_size)

        attention_weights = P.Concat([qids_embed, uids_neigh], axis=3)      # (bs, seq_len, inter_neigh_sample, embed_size * 2)
        attention_weights = P.Squeeze(self.interact_attention(attention_weights), 3)    # (batch_size, seq_len, inter_neigh_sample)
        attention_weights = P.Exp(self.interact_activation(attention_weights))  # (batch_size, seq_len, inter_neigh_sample)
        attention_weights = attention_weights / (self.expand_dims(P.ReduceSum(attention_weights, axis=2), 2))     # (batch_size, seq_len, inter_neigh_sample)

        qu_interactions = P.Mul(qu_interactions, self.expand_dims(attention_weights, 3))    # (batch_size, seq_len, inter_neigh_sample, embed_size)
        qu_interactions = P.ReduceSum(qu_interactions, axis=2)

        return qu_interactions


class ExamPredictor(nn.Cell):
    def __init__(self, args):
        super(ExamPredictor, self).__init__()
        self.args = args
        self.logger = logging.getLogger("GraphCM")
        self.exam_gru = nn.GRU(self.args.pos_embed_size + self.args.vtype_embed_size + self.args.click_embed_size, self.args.hidden_size, batch_first=True)
        self.dropout = nn.Dropout(keep_prob=self.args.dropout_rate)
        self.sigmoid = nn.Sigmoid()
        self.exam_out_dim = self.args.hidden_size
        self.exam_output_linear = nn.Dense(self.exam_out_dim, 1)

    def construct(self, vid_embed, click_embed, pos_embed):
        batch_size = vid_embed.shape[0]
        seq_len = vid_embed.shape[1]
        exam_input = P.Concat((vid_embed, click_embed, pos_embed), axis=2)
        exam_state = ms.Parameter(P.Zeros(1, batch_size, self.args.hidden_size))
        exam_outputs, exam_state = self.exam_gru(exam_input, exam_state)
        exam_outputs = self.dropout(exam_outputs)
        exams = self.sigmoid(self.exam_output_linear(exam_outputs)).view(batch_size, seq_len)
        return exams


class RelEstimator(nn.Cell):
    def __init__(self, args):
        super(RelEstimator, self).__init__()
        self.args = args
        self.logger = logging.getLogger("GraphCM")

        self.query_encoder = QueryEncoder(args)
        self.doc_encoder = DocEncoder(args)
        mlp_input_dim = self.args.embed_size * 3 if self.args.inter_neigh_sample > 0 else self.args.embed_size * 2
        self.mlp = nn.SequentialCell(
            nn.Dense(mlp_input_dim, self.args.embed_size),
            nn.Tanh(),
            nn.Dense(self.args.embed_size, 1),
            nn.Sigmoid()
        )

    def construct(self, qid_embed, uid_embed, vid_embed, click_embed, pos_embed, qu_interactions):
        batch_size = uid_embed.shape[0]
        seq_len = uid_embed.shape[1]
        encoded_query = self.query_encoder(qid_embed)
        encoded_doc = self.doc_encoder(uid_embed, vid_embed, click_embed, pos_embed)
        if qu_interactions is not None:
            mlp_input = P.Concat([encoded_query, encoded_doc, qu_interactions], axis=2)
        else:
            mlp_input = P.Concat([encoded_query, encoded_doc], axis=2)
        rels = self.mlp(mlp_input).view(batch_size, seq_len)
        return rels


class QueryEncoder(nn.Cell):
    def __init__(self, args):
        super(QueryEncoder, self).__init__()
        self.args = args
        self.logger = logging.getLogger("GraphCM")

        self.query_gru = nn.GRU(self.args.embed_size, self.args.hidden_size, batch_first=True)
        self.dropout = nn.Dropout(keep_prob=self.args.dropout_rate)
        self.activation = nn.Sigmoid()
        self.query_linear = nn.Dense(self.args.hidden_size, self.args.embed_size)

    def construct(self, qid_embed):
        batch_size = qid_embed.shape[0]
        session_num = qid_embed.shape[1]
        query_state = ms.Parameter(P.Zeros(1, batch_size, self.args.hidden_size))
        query_outputs, query_state = self.query_gru(qid_embed, query_state)     # [batch_size, session_num, hidden_size]
        query_outputs = ms_np.tile(query_outputs, (1, 1, 10)).view(batch_size, 10 * session_num, self.args.hidden_size)     # [batch_size, seq_len, hidden_size]
        query_outputs = self.dropout(query_outputs)
        encoded_query = self.activation(self.query_linear(query_outputs))
        return encoded_query    # [batch_size, seq_len, embed_size]


class DocEncoder(nn.Cell):
    def __init__(self, args):
        super(DocEncoder, self).__init__()
        self.args = args
        self.logger = logging.getLogger("GraphCM")

        self.doc_gru = nn.GRU(
            self.args.embed_size + self.args.pos_embed_size + self.args.vtype_embed_size + self.args.click_embed_size,
            self.args.hidden_size, batch_first=True)
        self.dropout = nn.Dropout(keep_prob=self.args.dropout_rate)
        self.activation = nn.Sigmoid()
        self.doc_linear = nn.Dense(self.args.hidden_size, self.args.embed_size)

    def construct(self, uid_embed, vid_embed, click_embed, pos_embed):
        batch_size = uid_embed.shape[0]
        doc_input = P.Concat((uid_embed, vid_embed, click_embed, pos_embed), axis=2)
        doc_state = ms.Parameter(P.Zeros(1, batch_size, self.args.hidden_size))
        doc_outputs, doc_state = self.doc_gru(doc_input, doc_state)
        doc_outputs = self.dropout(doc_outputs)
        encoded_doc = self.activation(self.doc_linear(doc_outputs))
        return encoded_doc  # [batch_size, seq_len, embed_size]


def create_data(batch):
    for i in range(batch['qids'].shape[0]):
        yield (batch['qids'], batch['uids'], batch['vids'], batch['clicks'])
        
        
if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='GraphCM')
    parser.add_argument("--device", type=str, default="Ascend", help="which device to use")
    parser.add_argument("--device_id", type=int, default=0, help="which device id to use")
    parser.add_argument('--train', default=True,
                        help='train the model')
    parser.add_argument('--valid', action='store_true',
                        help='perform click prediction task on valid set')
    parser.add_argument('--test', action='store_true',
                        help='perform click prediction task on test set')
    parser.add_argument('--data_path', type=str, default="./data/", help="Path to data file")
    parser.add_argument('--gnn_neigh_sample', type=int, default=5,
                        help='the number of neighbors to be sampled for GAT')

    train_settings = parser.add_argument_group('train settings')
    train_settings.add_argument('--dropout_rate', type=float, default=0.5,
                                help='dropout rate')
    train_settings.add_argument('--optim', default='adadelta',
                                help='optimizer type')
    train_settings.add_argument('--learning_rate', type=float, default=0.01,
                                help='learning rate')
    train_settings.add_argument('--weight_decay', type=float, default=1e-5,
                                help='weight decay')
    train_settings.add_argument('--momentum', type=float, default=0.99,
                                help='momentum')
    train_settings.add_argument('--num_steps', type=int, default=20000,
                                help='number of training steps')
    train_settings.add_argument('--batch_size', type=int, default=64,
                                help='train batch size')


    model_settings = parser.add_argument_group('model settings')
    model_settings.add_argument('--use_pretrain_embed', action='store_true',
                                help='whether use pretrained embeddings')
    model_settings.add_argument('--embed_size', type=int, default=64,
                                help='size of the query/doc embeddings')
    model_settings.add_argument('--click_embed_size', type=int, default=4,
                                help='size of the click embeddings')
    model_settings.add_argument('--vtype_embed_size', type=int, default=8,
                                help='size of the vtype embeddings')
    model_settings.add_argument('--pos_embed_size', type=int, default=4,
                                help='size of the position embeddings')
    model_settings.add_argument('--use_gnn', action='store_true',
                                help='whether use gnn layer')
    model_settings.add_argument('--inter_neigh_sample', type=int, default=0,
                                help='the number of neighbor to be sampled for interaction')
    model_settings.add_argument('--inter_leaky_slope', type=float, default=0.2,
                                help='leaky slope of leakyrelu for interaction')
    model_settings.add_argument('--hidden_size', type=int, default=128,
                                help='size of RNN hidden units')
    model_settings.add_argument('--combine', default='mul',
                                help='the combination type for examination and relevance')
    model_settings.add_argument('--gnn_att_heads', type=int, default=2,
                                help='the number of multi-head attention for GAT')
    model_settings.add_argument('--gnn_concat', type=bool, default=False,
                                help='whether perform concatenation in gat layer')
    model_settings.add_argument('--gnn_leaky_slope', type=float, default=0.2,
                                help='leaky slope of leakyrelu for gat layer')
    model_settings.add_argument('--max_d_num', type=int, default=10,
                                help='max number of docs in a session')
    model_settings.add_argument('--algo', default='GraphCM',
                                help='choose the algorithm to use')

    path_settings = parser.add_argument_group('path settings')
    path_settings.add_argument('--dataset', default='TREC2014',
                                help='name of the dataset to be used')
    path_settings.add_argument('--model_dir', default='./outputs/models/',
                               help='the dir to store models')
    path_settings.add_argument('--result_dir', default='./outputs/results/',
                               help='the dir to output the results')
    path_settings.add_argument('--summary_dir', default='./outputs/summary/',
                               help='the dir to write tensorboard summary')
    path_settings.add_argument('--log_dir', default='./outputs/log/',
                               help='path of the log file. If not set, logs are printed to console')
    path_settings.add_argument('--eval_freq', type=int, default=100,
                               help='the frequency of evaluating on the valid set when training')
    path_settings.add_argument('--load_model', type=int, default=-1,
                               help='load model global step')
    path_settings.add_argument('--patience', type=int, default=5,
                               help='lr decay when more than the patience times of evaluation where loss/ppl do not decrease')
    path_settings.add_argument('--check_point', type=int, default=100,
                               help='the frequency of saving model')
    path_settings.add_argument('--gpu_num', type=int, default=1,
                                help='gpu_num')


    args = parser.parse_args()
    from src.dataset import Dataset
    dataset = Dataset(args)
    train_batches = dataset.gen_mini_batches('train', args.batch_size, shuffle=True)
    model = GraphCM(args, dataset.query_size, dataset.doc_size, dataset.vtype_size, dataset)
    gnn_layer = DGATLayer(args, dataset.query_size, dataset.doc_size, dataset.vtype_size, dataset)
    for b_idx, batch in enumerate(train_batches):
        data = create_data(batch)
        ds_train = ms.dataset.GeneratorDataset(data, ['qids', 'uids', 'vids', 'clicks'], num_parallel_workers=12)
        for iter_num, data in enumerate(ds_train):
            qids, uids, vids, clicks = data
            # print(qids)
            gnn_layer(qids, uids, vids, clicks, args.use_gnn)
            # model(qids, uids, vids, clicks)
    