'''
Created on July 1, 2022
Tensorflow Implementation of Adaptive Adversarial Contrastive Learning for Cross-Domain Recommendation

@author: Chi-Wei Hsu (apple.iim09g@nycu.edu.tw)
'''
import tensorflow as tf
import numpy as np
import os
import sys
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
from random import random, randint
from time import time
from tqdm import tqdm
from collections import defaultdict
import heapq
import math
import scipy.sparse as sp
from utility.GNN import *

class ACLCDR(object):
    def __init__(self, args, data_s, data_t):
        self.args = args
        #if you have pretrained embeddings
        self.pretrain_data = None
        self.adj_type = args.adj_type
        self.initial_type = args.initial_type
        
        #source domain data
        self.data_generator_s = data_s
        #target domain data
        self.data_generator_t = data_t

        self.weight_id = random()
    
        self.Ks = eval(args.Ks)
        self.layer_size = args.layer_size
        self.neg_num = args.neg_num
        
        split_ids_s, split_status_s, split_ids_t, split_status_t = [], [], [], []
        
        #user ids for testing in the source domain
        split_ids_s.append(range(self.data_generator_s.testUserNum))
        split_status_s.append('full rating, #user=%d'%self.data_generator_s.testUserNum)
        #user ids for testing in the target domain
        split_ids_t.append(range(self.data_generator_t.testUserNum))
        split_status_t.append('full rating, #user=%d'%self.data_generator_t.testUserNum)
        
        self.split_ids_s = split_ids_s
        self.split_ids_t = split_ids_t
        self.split_status_s = split_status_s
        self.split_status_t = split_status_t
      
        self.loss_loger, self.pre_loger, self.rec_loger, self.ndcg_loger, self.hit_loger, self.mrr_loger = [], [], [], [], [], []
        self.pre_loger_t, self.rec_loger_t, self.ndcg_loger_t, self.hit_loger_t, self.mrr_loger_t = [], [], [], [], []
        
        #the number of source domain user
        self.n_users_s = data_s.n_users
        #the number of target domain user
        self.n_users_t = data_t.n_users
        #the number of source domain items
        self.n_items_s = data_s.n_items
        #the number of target domain items
        self.n_items_t = data_t.n_items
      
        #get initial adjacency matrices in the source domain
        self.plain_adj_s, self.norm_adj_s, self.mean_adj_s = data_s.get_adj_mat()
        #get initial adjacency matrices in the target domain
        self.plain_adj_t, self.norm_adj_t, self.mean_adj_t = data_t.get_adj_mat()
        
        #contrastive learning hyperparameters
        self.aug_type = args.aug_type
        self.cl_type = args.cl_type
        self.cl_reg = args.cl_reg
        self.cl_temp = args.cl_temp
        self.cl_ratio = args.cl_ratio
        self.cl_mode = args.cl_mode
        
        #set the specific number of user and item
        self.user_ratio = args.user_ratio
        self.item_ratio = args.item_ratio
        
        #get items based on the interactions or the ratings
        self.item_items_s, self.item_score_s = self.data_generator_s.cal_item_times_score()
        self.item_items_t, self.item_score_t = self.data_generator_t.cal_item_times_score()
        
        #get training user and items
        self.training_user_s, self.training_item_s = data_s.get_train_intersactions()
        self.training_user_t, self.training_item_t = data_t.get_train_intersactions()
        
        #get R matrices
        self.R_s = self.data_generator_s.get_R_mat()
        self.R_t = self.data_generator_t.get_R_mat()
        
        self.n_fold = 100       
        
        self.connect_way = args.connect_type
        self.layer_fun = args.layer_fun

        self.lr = args.lr
        self.n_interaction = args.n_interaction

        self.emb_dim = args.embed_size
        self.batch_size = args.batch_size

        self.weight_size = eval(args.layer_size)
        self.n_layers = len(self.weight_size)
        
#         print('Model Weight:', self.weight_size)
#         print('Model Layer:', self.n_layers)

        self.regs = eval(args.regs)
        self.decay = self.regs[0]
        self.verbose = args.verbose
        
        #control the information from the source domain to the target domain
        self.lambda_s = eval(args.lambda_s)
        #control the information from the target domain to the source domain
        self.lambda_t = eval(args.lambda_t)
        
        #parameter initialization
        if self.initial_type == 'x':
            self.initializer = tf.contrib.layers.xavier_initializer()
        elif self.initial_type == 'u':
            self.initializer = tf.random_normal_initializer()

        self.weight_source ,self.weight_target = eval(args.weight_loss)[:]
    
    #create placeholder for input data
    def _create_variable(self):
        self.users_s = tf.placeholder(tf.int32, shape=(None,))
        self.users_t = tf.placeholder(tf.int32, shape=(None,))
        
        #overlapping user across domains
        self.overlapped_users = tf.placeholder(tf.int32, shape=(None,))
        
        self.items_s = tf.placeholder(tf.int32, shape=(None,))
        self.items_s_neg = tf.placeholder(tf.int32, shape=(None,))        
        
        self.items_t = tf.placeholder(tf.int32, shape=(None,))
        self.items_t_neg = tf.placeholder(tf.int32, shape=(None,))        
        
        self.label_s = tf.placeholder(tf.float32,shape=(None,))
        self.label_t = tf.placeholder(tf.float32,shape=(None,))
        self.isTraining = tf.placeholder(tf.bool)
        
        #placeholder for adjacency matrix(coo type)
        #we use these to store the augmentation graph
        self.sub_mat_s = {}
        self.sub_mat_t = {}
        
        self.sub_mat_s['adj_values_sub1'] = tf.placeholder(tf.float32)
        self.sub_mat_s['adj_indices_sub1'] = tf.placeholder(tf.int64)
        self.sub_mat_s['adj_shape_sub1'] = tf.placeholder(tf.int64)
        
        self.sub_mat_t['adj_values_sub1'] = tf.placeholder(tf.float32)
        self.sub_mat_t['adj_indices_sub1'] = tf.placeholder(tf.int64)
        self.sub_mat_t['adj_shape_sub1'] = tf.placeholder(tf.int64)

        #node dropout mechanism to prevent overfitting
        self.node_dropout_flag = self.args.node_dropout_flag
        self.node_dropout = tf.placeholder(tf.float32, shape=[None])
        self.mess_dropout = tf.placeholder(tf.float32, shape=[None])

        # initialization of model parameters
        self.weights_source = self._init_weights('source',self.n_users_s,self.n_items_s,None)
        self.weights_target = self._init_weights('target',self.n_users_t,self.n_items_t,None)
   
    #generate user embeddings and item embeddings
    def build_graph(self):
        
        #initial variables used in the ACLCDR framework
        self._create_variable()
        
        #ddqn 
        if self.cl_type == 0:
            pass
        #get random augmentation graph
        elif self.cl_type == 1:
            self.create_embed_with_random()
        #get rating/interaction graph
        elif self.cl_type == 2:    
            self.create_embed_with_interaction()
        else:
            self.create_embed_with_rating()
            
        with tf.name_scope('inference'):
            #get the original representations and augmented representations with the choosed augmentation method
            self.ua_embeddings_s, self.ia_embeddings_s, self.ua_embeddings_t, self.ia_embeddings_t, self.ua_embeddings_sub1_s, self.ia_embeddings_sub1_s, self.ua_embeddings_sub1_t, self.ia_embeddings_sub1_t = create_embed(self.weights_source, self.weights_target, self.norm_adj_s, self.norm_adj_t, self.n_fold, self.node_dropout, self.mess_dropout, self.n_layers, self.node_dropout_flag, self.sub_mat_s, self.sub_mat_t, self.n_users_s, self.n_users_t, self.n_items_s, self.n_items_t, self.connect_way, self.layer_fun)

        
            #user look-up table to assign embeddings to users and items based on their ids
            #---------------------original graph embedding------------------------------------
            self.u_g_embeddings_s = tf.nn.embedding_lookup(self.ua_embeddings_s, self.users_s) 
            self.u_g_embeddings_t = tf.nn.embedding_lookup(self.ua_embeddings_t, self.users_t) 
            self.i_g_embeddings_s = tf.nn.embedding_lookup(self.ia_embeddings_s, self.items_s) 
            self.i_g_embeddings_t = tf.nn.embedding_lookup(self.ia_embeddings_t, self.items_t)
            
            #collaboratetive filtering recommendation score
            self.scores_s = self.get_scores(self.u_g_embeddings_s,self.i_g_embeddings_s)      
            self.scores_t = self.get_scores(self.u_g_embeddings_t,self.i_g_embeddings_t)

            #single-domain contrastive learning in the source domain (Intradomain tasks)
            self.ssl_loss_s = self.calc_ssl_loss(self.ua_embeddings_s, self.ua_embeddings_sub1_s, self.users_s, self.ia_embeddings_s, self.ia_embeddings_sub1_s, self.items_s)

            #single-domain contrastive leraning in the target domain (Intradomain tasks)
            self.ssl_loss_t = self.calc_ssl_loss(self.ua_embeddings_t, self.ua_embeddings_sub1_t, self.users_t, self.ia_embeddings_t, self.ia_embeddings_sub1_t, self.items_t)
            
            #cross-domain contrastive learning (Interdomain task)
            self.ssl_loss_cross_origin = self.calc_ssl_cross_loss(self.ua_embeddings_s, self.overlapped_users, self.ua_embeddings_t, self.overlapped_users)

            #recommendation loss, embeddings loss and regularizer loss
            self.mf_loss_s, self.emb_loss_s, self.reg_loss_s  = self.create_cross_loss(self.u_g_embeddings_s,
                                                                              self.i_g_embeddings_s, self.label_s, self.scores_s)
            self.mf_loss_t, self.emb_loss_t, self.reg_loss_t  = self.create_cross_loss(self.u_g_embeddings_t,
                                                                              self.i_g_embeddings_t, self.label_t, self.scores_t)           
            
            #final recommendation loss
            self.loss_source = self.mf_loss_s + self.emb_loss_s + self.reg_loss_s
            self.loss_target = self.mf_loss_t + self.emb_loss_t + self.reg_loss_t
            
            #cross-domain loss with interdomain contrastive loss
            self.loss = self.loss_source + self.loss_target + self.cl_ratio * self.ssl_loss_cross_origin

            #single-domain loss with intradomain contrastive loss
            self.loss_source = self.loss_source + self.cl_ratio * self.ssl_loss_s
            self.loss_target = self.loss_target + self.cl_ratio * self.ssl_loss_t
            
            #optimizer
            self.opt = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(self.loss)
            self.opt_s = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(self.loss_source,var_list=[self.weights_source])
            self.opt_t = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(self.loss_target,var_list=[self.weights_target])
    
    #initial weights
    def _init_weights(self, name_scope, n_users, n_items, user_embedding):
        all_weights = dict()
        
        #initializer
        initializer = self.initializer        
        
        #if no pretrained embeddings
        if self.pretrain_data is None:
            print('Get Intial Embeddings!')
            if user_embedding is None:
                all_weights['user_embedding'] = tf.Variable(initializer([n_users, self.emb_dim]), name='user_embedding_%s'%name_scope)
                all_weights['item_embedding'] = tf.Variable(initializer([n_items, self.emb_dim]), name='item_embedding_%s'%name_scope)
            else:
                all_weights['user_embedding'] = tf.Variable(initial_value=user_embedding,trainable=True, name='user_embedding_%s'%name_scope)
                all_weights['item_embedding'] = tf.Variable(initializer([n_items, self.emb_dim]), name='item_embedding_%s'%name_scope)
        #if there are pretrained embeddings
        else:
            print('Get Pretrained Embeddings!')
            all_weights['user_embedding'] = tf.Variable(initial_value=self.pretrain_data['user_embed'], trainable=True,
                                                        name='user_embedding_%s'%name_scope, dtype=tf.float32)
            all_weights['item_embedding'] = tf.Variable(initial_value=self.pretrain_data['item_embed'], trainable=True,
                                                        name='item_embedding_%s'%name_scope, dtype=tf.float32)
               
        #weight size
        #Format: [embedding size] + [GNN size]
        self.weight_size_list = [self.emb_dim] + self.weight_size
        print('Weight Size List')
        print(self.weight_size_list)
        
        #initial weights
        for k in range(self.n_layers):
            all_weights['W_gc_%d' %k] = tf.Variable(
                initializer([self.weight_size_list[k], self.weight_size_list[k+1]]), name='W_gc_%d_%s' %(k,name_scope))
            all_weights['b_gc_%d' %k] = tf.Variable(
                initializer([1, self.weight_size_list[k+1]]), name='b_gc_%d_%s' %(k,name_scope))

            all_weights['W_bi_%d' %k] = tf.Variable(
                initializer([self.weight_size_list[k], self.weight_size_list[k + 1]]), name='W_bi_%d_%s' %(k,name_scope))
            all_weights['b_bi_%d' %k] = tf.Variable(
                initializer([1, self.weight_size_list[k + 1]]), name='b_bi_%d_%s' %(k,name_scope))

            all_weights['W_trans_%d' %k] = tf.Variable(
                initializer([2*self.weight_size_list[k+1], self.weight_size_list[k+1]]), name='W_trans_%s_%s' %(k,name_scope))

        return all_weights
    
    #convert coo type to sp
    def _convert_sp_mat_to_sp_tensor(self, X):
        coo = X.tocoo().astype(np.float32)
        indices = np.mat([coo.row, coo.col]).transpose()
        return tf.SparseTensor(indices, coo.data, coo.shape)

    #convert csr type to sp
    def _convert_csr_to_sparse_tensor_inputs(self, X):
        coo = X.tocoo()
        indices = np.mat([coo.row, coo.col]).transpose()
        return indices, coo.data, coo.shape
    
    #randomly generate adjacency matrix (Node Dropout, Edge Addition/Dropout, Subgraph)
    def create_embed_with_random(self):            
        self.norm_adj_s_sub1 = self.create_adj_mat_rdn(self.training_user_s, self.training_item_s, self.n_users_s, self.n_items_s)
        self.norm_adj_t_sub1 = self.create_adj_mat_rdn(self.training_user_t, self.training_item_t, self.n_users_t, self.n_items_t)
   
    #generate adjacency matrix based on the number of interactions
    def create_embed_with_interaction(self):
        user_size = min(int(self.n_users_s * self.user_ratio), int(self.n_users_t * self.user_ratio))
        item_size = min(int(self.n_items_s * self.item_ratio), int(self.n_items_t * self.item_ratio))
        
        self.user_nodes_list_s = self.randint_choice(self.n_users_s, size=user_size, replace=False)        
        self.user_nodes_list_t = self.randint_choice(self.n_users_t, size=user_size, replace=False)
        
        item_add_list_s = []
        item_drop_list_s = []
        for idx in range(item_size):
            item_add_list_s.append(self.item_items_s[idx][0])
            
            if idx == 0:
                pass
            item_drop_list_s.append(self.item_items_s[-idx][0])
        
        item_add_list_t = []
        item_drop_list_t = []
        for idx in range(item_size):
            item_add_list_t.append(self.item_items_t[idx][0])
            
            if idx == 0:
                pass
            item_drop_list_t.append(self.item_items_t[-idx][0])
        
        #source domain
        for user_idx in self.user_nodes_list_s:
            random_idx = randint(0, len(item_add_list_s)-1)
            add_item_idx = item_add_list_s[random_idx]

            self.R_s[user_idx, add_item_idx] = 1.0
            
            random_idx = randint(0, len(item_drop_list_s)-1)
            drop_item_idx = item_drop_list_s[random_idx]

            self.R_s[user_idx, drop_item_idx] = 0.0
        
        #target domain
        for user_idx in self.user_nodes_list_t:
            random_idx = randint(0, len(item_add_list_t)-1)
            add_item_idx = item_add_list_t[random_idx]

            self.R_t[user_idx, add_item_idx] = 1.0
            
            random_idx = randint(0, len(item_drop_list_t)-1)
            drop_item_idx = item_drop_list_t[random_idx]

            self.R_t[user_idx, drop_item_idx] = 0.0
            
        
        self.norm_adj_s_sub1 = self.create_adj_mat(self.n_users_s, self.n_items_s, self.R_s)
        self.norm_adj_t_sub1 = self.create_adj_mat(self.n_users_t, self.n_items_t, self.R_t)
    
    #generate adjacency matrix based on the ratings of item
    def create_embed_with_rating(self):
        user_size = min(int(self.n_users_s * self.user_ratio), int(self.n_users_t * self.user_ratio))
        item_size = min(int(self.n_items_s * self.item_ratio), int(self.n_items_t * self.item_ratio))
        
        self.user_nodes_list_s = self.randint_choice(self.n_users_s, size=user_size, replace=False)        
        self.user_nodes_list_t = self.randint_choice(self.n_users_t, size=user_size, replace=False)
        
        item_add_list_s = []
        item_drop_list_s = []
        for idx in range(item_size):
            item_add_list_s.append(self.item_score_s[idx][0])
            
            if idx == 0:
                pass
            item_drop_list_s.append(self.item_score_s[-idx][0])
        
        item_add_list_t = []
        item_drop_list_t = []
        for idx in range(item_size):
            item_add_list_t.append(self.item_score_t[idx][0])
            
            if idx == 0:
                pass
            item_drop_list_t.append(self.item_score_t[-idx][0])
        
        #source domain
        for user_idx in self.user_nodes_list_s:
            random_idx = randint(0, len(item_add_list_s)-1)
            add_item_idx = item_add_list_s[random_idx]

            self.R_s[user_idx, add_item_idx] = 1.0
            
            random_idx = randint(0, len(item_drop_list_s)-1)
            drop_item_idx = item_drop_list_s[random_idx]

            self.R_s[user_idx, drop_item_idx] = 0.0
        
        #target domain
        for user_idx in self.user_nodes_list_t:
            random_idx = randint(0, len(item_add_list_t)-1)
            add_item_idx = item_add_list_t[random_idx]

            self.R_t[user_idx, add_item_idx] = 1.0
            
            random_idx = randint(0, len(item_drop_list_t)-1)
            drop_item_idx = item_drop_list_t[random_idx]

            self.R_t[user_idx, drop_item_idx] = 0.0
            
        
        self.norm_adj_s_sub1 = self.create_adj_mat(self.n_users_s, self.n_items_s, self.R_s)
        self.norm_adj_t_sub1 = self.create_adj_mat(self.n_users_t, self.n_items_t, self.R_t)
       
    #get random number
    def randint_choice(self, high, size=None, replace=True, p=None, exclusion=None):
        a = np.arange(high)
        if exclusion is not None:
            if p is None:
                p = np.ones_like(a)
            else:
                p = np.array(p, copy=True)
            p = p.flatten()
            p[exclusion] = 0
        if p is not None:
            p = p / np.sum(p)
        sample = np.random.choice(a, size=size, replace=replace, p=p)
        return sample
    
    #gererate adjecency matrix
    def create_adj_mat(self, n_users, n_items, R):
        t1 = time()
        adj_mat = sp.dok_matrix((n_users + n_items, n_users + n_items), dtype=np.float32)
        adj_mat = adj_mat.tolil()
        R = R.tolil()

        adj_mat[:n_users, n_users:] = R
        adj_mat[n_users:, :n_users] = R.T
        adj_mat = adj_mat.todok()
        print('already create adjacency matrix', adj_mat.shape, time() - t1)

        t2 = time()

        def normalized_adj_single(adj):
            rowsum = np.array(adj.sum(1))

            d_inv = np.power(rowsum, -1).flatten()
            d_inv[np.isinf(d_inv)] = 0.
            d_mat_inv = sp.diags(d_inv)

            norm_adj = d_mat_inv.dot(adj)
            # norm_adj = adj.dot(d_mat_inv)
            print('generate single-normalized adjacency matrix.')
            return norm_adj.tocoo()

        norm_adj_mat = normalized_adj_single(adj_mat + sp.eye(adj_mat.shape[0]))

        print('already normalize adjacency matrix', time() - t2)
        return norm_adj_mat.tocsr()
    
    #random generate augmentation graph (Node dropout, Edge Addition/Dropout, Subgraph)
    def create_adj_mat_rdn(self, users, items, n_users, n_items):
        n_nodes = n_users + n_items
        if self.aug_type in [0, 1, 2] and self.cl_type == 1:
            if self.aug_type == 0:
                print('Node dropout')
                drop_user_idx = self.randint_choice(n_users, size=int(n_users * self.user_ratio), replace=False)
                drop_item_idx = self.randint_choice(n_items, size=int(n_items * self.item_ratio), replace=False)
                indicator_user = np.ones(n_users, dtype=np.float32)
                indicator_item = np.ones(n_items, dtype=np.float32)
                indicator_user[drop_user_idx] = 0.
                indicator_item[drop_item_idx] = 0.
                diag_indicator_user = sp.diags(indicator_user)
                diag_indicator_item = sp.diags(indicator_item)
                R = sp.csr_matrix(
                    (np.ones_like(users, dtype=np.float32), (users, items)), 
                    shape=(n_users, n_items))
                R_prime = diag_indicator_user.dot(R).dot(diag_indicator_item)
                (user_np_keep, item_np_keep) = R_prime.nonzero()
                ratings_keep = R_prime.data
                tmp_adj = sp.csr_matrix((ratings_keep, (user_np_keep, item_np_keep+n_users)), shape=(n_nodes, n_nodes))
            if self.aug_type in [1, 2]:
                print('Edge or Subgraph')
                keep_idx = self.randint_choice(len(users), size=int(len(users) * self.user_ratio), replace=False)
                user_np = np.array(users)[keep_idx]
                item_np = np.array(items)[keep_idx]
                ratings = np.ones_like(user_np, dtype=np.float32)
                tmp_adj = sp.csr_matrix((ratings, (user_np, item_np+n_users)), shape=(n_nodes, n_nodes))
        else:
            print('Invalid Adjacency Creation Mode')
            
        adj_mat = tmp_adj + tmp_adj.T

        rowsum = np.array(adj_mat.sum(1))
        d_inv = np.power(rowsum, -0.5).flatten()
        d_inv[np.isinf(d_inv)] = 0.
        d_mat_inv = sp.diags(d_inv)
        norm_adj_tmp = d_mat_inv.dot(adj_mat)
        adj_matrix = norm_adj_tmp.dot(d_mat_inv)

        return adj_matrix
    
    #collaborative filtering to get recommendation score
    def get_scores(self,users,pos_items):
        scores = tf.reduce_sum(tf.multiply(users, pos_items),axis = 1)
        return scores
    
    #recommendation loss
    def create_cross_loss(self, users, pos_items,label,scores):
        regularizer = tf.nn.l2_loss(users) + tf.nn.l2_loss(pos_items)
        regularizer = regularizer/self.batch_size

        mf_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=label,logits=scores)

        emb_loss = self.decay * regularizer

        reg_loss = tf.constant(0.0, tf.float32, [1])

        return mf_loss, emb_loss, reg_loss
    
    #contrastive learning loss for the interdomain task (Task 3)
    def calc_ssl_cross_loss(self, users_emb_s, users_s, users_emb_t, users_t):
        user_emb1 = tf.nn.embedding_lookup(users_emb_s, users_s)
        user_emb2 = tf.nn.embedding_lookup(users_emb_t, users_t)

        normalize_user_emb1 = tf.nn.l2_normalize(user_emb1, 1)
        normalize_user_emb2 = tf.nn.l2_normalize(user_emb2, 1)
        normalize_all_user_emb2 = tf.nn.l2_normalize(users_emb_t, 1)
        pos_score_user = tf.reduce_sum(tf.multiply(normalize_user_emb1, normalize_user_emb2), axis=1)
        ttl_score_user = tf.matmul(normalize_user_emb1, normalize_all_user_emb2, transpose_a=False, transpose_b=True)

        pos_score_user = tf.exp(pos_score_user / self.cl_temp)
        ttl_score_user = tf.reduce_sum(tf.exp(ttl_score_user / self.cl_temp), axis=1)

        ssl_loss_user = -tf.reduce_sum(tf.log(pos_score_user / ttl_score_user))
        ssl_loss = self.cl_reg * ssl_loss_user
        return ssl_loss
    
    #contrastive learning loss for the intradomain task (Task 1, Task 2)
    def calc_ssl_loss(self, users_sub1, users_sub2, users, pos_items_1, pos_items_2, pos_items):
        #user-level intradomain task
        if self.cl_mode in ['user_level', 'all_level']:
            user_emb1 = tf.nn.embedding_lookup(users_sub1, users)
            user_emb2 = tf.nn.embedding_lookup(users_sub2, users)

            normalize_user_emb1 = tf.nn.l2_normalize(user_emb1, 1)
            normalize_user_emb2 = tf.nn.l2_normalize(user_emb2, 1)
            normalize_all_user_emb2 = tf.nn.l2_normalize(users_sub2, 1)
            pos_score_user = tf.reduce_sum(tf.multiply(normalize_user_emb1, normalize_user_emb2), axis=1)
            ttl_score_user = tf.matmul(normalize_user_emb1, normalize_all_user_emb2, transpose_a=False, transpose_b=True)

            pos_score_user = tf.exp(pos_score_user / self.cl_temp)
            ttl_score_user = tf.reduce_sum(tf.exp(ttl_score_user / self.cl_temp), axis=1)

            ssl_loss_user = -tf.reduce_sum(tf.log(pos_score_user / ttl_score_user))
        #item-level intradomain task
        if self.cl_mode in ['item_level', 'all_level']:
            item_emb1 = tf.nn.embedding_lookup(pos_items_1, pos_items)
            item_emb2 = tf.nn.embedding_lookup(pos_items_2, pos_items)

            normalize_item_emb1 = tf.nn.l2_normalize(item_emb1, 1)
            normalize_item_emb2 = tf.nn.l2_normalize(item_emb2, 1)
            normalize_all_item_emb2 = tf.nn.l2_normalize(pos_items_2, 1)
            pos_score_item = tf.reduce_sum(tf.multiply(normalize_item_emb1, normalize_item_emb2), axis=1)
            ttl_score_item = tf.matmul(normalize_item_emb1, normalize_all_item_emb2, transpose_a=False, transpose_b=True)
            
            pos_score_item = tf.exp(pos_score_item / self.cl_temp)
            ttl_score_item = tf.reduce_sum(tf.exp(ttl_score_item / self.cl_temp), axis=1)

            ssl_loss_item = -tf.reduce_sum(tf.log(pos_score_item / ttl_score_item))

        if self.cl_mode == 'user_level':
            ssl_loss = self.cl_reg * ssl_loss_user
        elif self.cl_mode == 'item_level':
            ssl_loss = self.cl_reg * ssl_loss_item
        elif self.cl_mode == 'all_level':
            ssl_loss = self.cl_reg * (ssl_loss_user + ssl_loss_item)
        else:
            print('please check your hyperparameter "cl_mode".')
            ssl_loss = 0.0
        
        return ssl_loss
    
    #training process
    def train_model(self, sess, best_hr_s, best_hr_t, norm_adj_s, norm_adj_t):
        #use for early stopping
        stopping_step = 0
        stopping_step_s = 0
        should_stop_s = False
        should_stop_t = False
        
        verbose = 10
        isonebatch = False
        
        #train for the number of epochs round
        for epoch in tqdm(range(self.args.epoch)):
            t1 = time()
            loss, loss_source, loss_target = [],[],[]
            user_input_s,item_input_s,label_s = self.data_generator_s.get_train_instance()
            user_input_t,item_input_t,label_t = self.data_generator_t.get_train_instance()
            train_len_s = len(user_input_s)
            train_len_t = len(user_input_t)
            shuffled_idx_s = np.random.permutation(np.arange(train_len_s))
            train_u_s = user_input_s[shuffled_idx_s]
            train_i_s = item_input_s[shuffled_idx_s]
            train_r_s = label_s[shuffled_idx_s]
            shuffled_idx_t = np.random.permutation(np.arange(train_len_t))
            train_u_t = user_input_t[shuffled_idx_t]
            train_i_t = item_input_t[shuffled_idx_t]
            train_r_t = label_t[shuffled_idx_t]
            n_batch_s = train_len_s // self.args.batch_size + 1
            n_batch_t = train_len_t // self.args.batch_size + 1
            n_batch_max = max(n_batch_s,n_batch_t)
            n_batch_min = min(n_batch_s,n_batch_t)
            
            #convert augmented adjacency matrices to coo matrix as input to the model
            sub_mat_s = {}
            sub_mat_t = {}
            sub_mat_s['adj_indices_sub1'], sub_mat_s['adj_values_sub1'], sub_mat_s['adj_shape_sub1'] = self._convert_csr_to_sparse_tensor_inputs(norm_adj_s)
            
            sub_mat_t['adj_indices_sub1'], sub_mat_t['adj_values_sub1'], sub_mat_t['adj_shape_sub1'] = self._convert_csr_to_sparse_tensor_inputs(norm_adj_t)
            
            # if the number of data in the source domain is more than in the target domain
            if n_batch_s>=n_batch_t:
                print('source domain single train')
                #train a batch data
                for i in range(n_batch_min,n_batch_max):
                    min_idx = i*self.args.batch_size
                    max_idx = np.min([(i+1)*self.args.batch_size,train_len_s])
                    if max_idx<(i+1)*self.args.batch_size:
                        idex = list(range(min_idx,max_idx))+list(np.random.randint(0,train_len_s,(i+1)*self.args.batch_size-max_idx))
                        train_u_batch = train_u_s[idex]
                        train_i_batch = train_i_s[idex]
                        train_r_batch = train_r_s[idex]
                    else:
                        train_u_batch = train_u_s[min_idx: max_idx]
                        train_i_batch = train_i_s[min_idx: max_idx]
                        train_r_batch = train_r_s[min_idx: max_idx]
                    
                    #input data
                    feed_dict = {self.users_s: train_u_batch, self.items_s: train_i_batch, self.label_s:train_r_batch,
                                self.node_dropout: eval(self.args.node_dropout),
                                self.mess_dropout: eval(self.args.mess_dropout),
                                self.isTraining:False}
                    
                    #input data with the augmentation graph in the source domain
                    feed_dict.update({
                        self.sub_mat_s['adj_indices_sub1']: sub_mat_s['adj_indices_sub1'],
                        self.sub_mat_s['adj_values_sub1']: sub_mat_s['adj_values_sub1'],
                        self.sub_mat_s['adj_shape_sub1']: sub_mat_s['adj_shape_sub1']                 
                    })
                    
                    #input data to the model and get the training loss
                    _, batch_loss_source= sess.run([self.opt_s, self.loss_source],
                                    feed_dict=feed_dict)
                    
                    #append training loss
                    loss_source.append(batch_loss_source) 
            else:
                print('target domain single train')
                #train a batch data
                for i in range(n_batch_min,n_batch_max):
                    min_idx = i*self.args.batch_size
                    max_idx = np.min([(i+1)*self.args.batch_size,train_len_t])
                    if max_idx<(i+1)*self.args.batch_size:
                        idex = list(range(min_idx,max_idx))+list(np.random.randint(0,train_len_t,(i+1)*self.args.batch_size-max_idx))
                        train_u_batch = train_u_t[idex]
                        train_i_batch = train_i_t[idex]
                        train_r_batch = train_r_t[idex]
                    else:
                        train_u_batch = train_u_t[min_idx: max_idx]
                        train_i_batch = train_i_t[min_idx: max_idx]
                        train_r_batch = train_r_t[min_idx: max_idx]
                    
                    #input data
                    feed_dict = {self.users_t: train_u_batch, self.items_t: train_i_batch, self.label_t:train_r_batch,
                                self.node_dropout: eval(self.args.node_dropout),
                                self.mess_dropout: eval(self.args.mess_dropout),
                                self.isTraining:False}
                    
                    #input data with the augmentation graph in the target domain
                    feed_dict.update({
                        self.sub_mat_t['adj_indices_sub1']: sub_mat_t['adj_indices_sub1'],
                        self.sub_mat_t['adj_values_sub1']: sub_mat_t['adj_values_sub1'],
                        self.sub_mat_t['adj_shape_sub1']: sub_mat_t['adj_shape_sub1']                    
                    })
                    
                    #input data to the model and get the training loss
                    _, batch_loss_target= sess.run([self.opt_t, self.loss_target],
                                    feed_dict=feed_dict)
                    #append training loss
                    loss_target.append(batch_loss_target)
            
            print('cross domain dual train')
            for i in range(n_batch_min):
                min_idx = i*self.args.batch_size
                max_idx = np.min([(i+1)*self.args.batch_size,min([train_len_s,train_len_t])])
                if max_idx<(i+1)*self.args.batch_size:
                    idex = list(range(min_idx,max_idx))+list(np.random.randint(0,min([train_len_s,train_len_t]),(i+1)*self.args.batch_size-max_idx))
                    train_u_batch_s = train_u_s[idex]
                    train_i_batch_s = train_i_s[idex]
                    train_r_batch_s = train_r_s[idex]                
                    train_u_batch_t = train_u_t[idex]
                    train_i_batch_t = train_i_t[idex]
                    train_r_batch_t = train_r_t[idex]
                else:
                    train_u_batch_s = train_u_s[min_idx: max_idx]
                    train_i_batch_s = train_i_s[min_idx: max_idx]
                    train_r_batch_s = train_r_s[min_idx: max_idx]
                    train_u_batch_t = train_u_t[min_idx: max_idx]
                    train_i_batch_t = train_i_t[min_idx: max_idx]
                    train_r_batch_t = train_r_t[min_idx: max_idx]
                
                #get overlapping user
                overlapped_user = list(set(train_u_batch_s) & set(train_u_batch_t))
                
                #input data with overlapping user
                feed_dict = {self.users_s: train_u_batch_s, self.items_s: train_i_batch_s, self.label_s:train_r_batch_s,
                            self.users_t: train_u_batch_t, self.items_t: train_i_batch_t, self.label_t:train_r_batch_t,
                            self.node_dropout: eval(self.args.node_dropout),
                            self.mess_dropout: eval(self.args.mess_dropout),
                            self.isTraining:True,
                            self.overlapped_users: overlapped_user}       
                
                #input data with the augmentation graph in the source domain and the target domain
                feed_dict.update({
                    self.sub_mat_s['adj_indices_sub1']: sub_mat_s['adj_indices_sub1'],
                    self.sub_mat_s['adj_values_sub1']: sub_mat_s['adj_values_sub1'],
                    self.sub_mat_s['adj_shape_sub1']: sub_mat_s['adj_shape_sub1'],
                    self.sub_mat_t['adj_indices_sub1']: sub_mat_t['adj_indices_sub1'],
                    self.sub_mat_t['adj_values_sub1']: sub_mat_t['adj_values_sub1'],
                    self.sub_mat_t['adj_shape_sub1']: sub_mat_t['adj_shape_sub1']
                })
                
                #run the model with data
                _, batch_loss,batch_loss_source,batch_loss_target= sess.run([self.opt, self.loss, self.loss_source, self.loss_target],
                                feed_dict=feed_dict)
                
                #append training loss
                loss.append(batch_loss)
                loss_source.append(batch_loss_source)
                loss_target.append(batch_loss_target)
            
            #get mean loss
            losses = np.mean(loss)
            loss_source = np.mean(loss_source)
            loss_target = np.mean(loss_target)
            
            #print loss
            print("Mean loss in this epoch is: {} , mean source loss is: {},mean target loss is: {}".format(losses,loss_source,loss_target))
    
            t2 = time()
    
            #------------------testing on the validation set---------------          
            #find out the best epoch with (Hit Ratio), (NDCG), (MRR)
            max_epoch_s = 0
            max_hit_s = [0.0,0.0]
            max_ndcg_s = [0.0,0.0]
            max_mrr_s = [0.0,0.0]
            
            for valid_user_list_s, data_status_s in zip(self.split_ids_s, self.split_status_s):
                valid_ratings = np.array(self.data_generator_s.validRatingList)
                valid_negatives = self.data_generator_s.validNegativeList

                hits, ndcgs, mrrs = [],[],[]
                users,items,user_gt_item = self.get_test_instance(valid_user_list_s, valid_ratings, valid_negatives)
                num_valid_batches = len(users)//self.args.batch_size
                # bar_test = ProgressBar('test_'+_data_type, max=num_test_batches+1)
                # sample_id = 0
                valid_preds = []

#                 for current_batch in tqdm(range(num_test_batches+1),desc='test_source',ascii=True):
                for current_batch in range(num_valid_batches+1):
                    # bar_test.next()
                    min_idx = current_batch*self.args.batch_size
                    max_idx = np.min([(current_batch+1)*self.args.batch_size,len(users)])
                    batch_input_users = users[min_idx:max_idx]
                    batch_input_items = items[min_idx:max_idx]
                    feed_dict = {self.users_s:batch_input_users,
                                 self.items_s:batch_input_items,
                                 self.node_dropout: [0.]*len(eval(self.layer_size)),
                                 self.mess_dropout: [0.]*len(eval(self.layer_size)),
                                 self.isTraining:True}
                    predictions, user_embedding_s, item_embedding_s = sess.run([self.scores_s,self.u_g_embeddings_s,self.i_g_embeddings_s], feed_dict=feed_dict)
                    valid_preds.extend(predictions)
                assert len(valid_preds)==len(users),'source num is not equal'

                user_item_preds = defaultdict(lambda: defaultdict(float))
                user_pred_gtItem = defaultdict(float)
                for sample_id in range(len(users)):
                    user = users[sample_id]
                    item = items[sample_id]
                    pred = valid_preds[sample_id]  # [pos_prob, neg_prob]
                    user_item_preds[user][item] = pred
                for user in user_item_preds.keys():
                    item_pred = user_item_preds[user]
                    hrs,nds,mrs=[],[],[]
                    for k in self.Ks:
                        ranklist = heapq.nlargest(k, item_pred, key=item_pred.get)
                        hr = self.getHitRatio(ranklist, user_gt_item[user])
                        ndcg = self.getNDCG(ranklist, user_gt_item[user])
                        mrr = self.getMRR(ranklist, user_gt_item[user])
                        hrs.append(hr)
                        nds.append(ndcg)
                        mrs.append(mrr)
                    hits.append(hrs)
                    ndcgs.append(nds)
                    mrrs.append(mrs)
                hr_s, ndcg_s, mrr_s = np.array(hits).mean(axis=0), np.array(ndcgs).mean(axis=0), np.array(mrrs).mean(axis=0)    
                #recommendation performance on validation set
#                 self.print_test_result(epoch, hr_s, ndcg_s, mrr_s, t2-t1, time()-t2, losses, loss_source, loss_target, 'source', data_status_s)
            t3 = time()
            
            #find out the best epoch with (Hit Rate), (NDCG), (MRR)
            max_hit_t = [0.0,0.0]
            max_ndcg_t = [0.0,0.0]
            max_mrr_t = [0.0,0.0]
            max_epoch_t = 0
            
            #get validation data and test
            for valid_user_list_t, data_status_t in zip(self.split_ids_t,self.split_status_t):
                valid_ratings = np.array(self.data_generator_t.validRatingList)
                valid_negatives = self.data_generator_t.validNegativeList

                hits, ndcgs, mrrs = [],[],[]
                users,items,user_gt_item = self.get_test_instance(valid_user_list_t, valid_ratings, valid_negatives)
                num_valid_batches = len(users)//self.args.batch_size
                valid_preds = []

                for current_batch in range(num_valid_batches+1):
                    min_idx = current_batch*self.args.batch_size
                    max_idx = np.min([(current_batch+1)*self.args.batch_size,len(users)])
                    batch_input_users = users[min_idx:max_idx]
                    batch_input_items = items[min_idx:max_idx]
                    feed_dict = {self.users_t:batch_input_users,
                                 self.items_t:batch_input_items,
                                 self.node_dropout: [0.]*len(eval(self.layer_size)),
                                 self.mess_dropout: [0.]*len(eval(self.layer_size)),
                                 self.isTraining:True}
                    predictions, user_embedding_t, item_embedding_t = sess.run([self.scores_t,self.u_g_embeddings_t,self.i_g_embeddings_t], feed_dict=feed_dict)
                    valid_preds.extend(predictions)
                assert len(valid_preds)==len(users),'target num is not equal'

                user_item_preds = defaultdict(lambda: defaultdict(float))
                user_pred_gtItem = defaultdict(float)
                for sample_id in range(len(users)):
                    user = users[sample_id]
                    item = items[sample_id]
                    pred = valid_preds[sample_id]  # [pos_prob, neg_prob]
                    user_item_preds[user][item] = pred
                for user in user_item_preds.keys():
                    item_pred = user_item_preds[user]
                    hrs,nds,mrs=[],[],[]
                    for k in self.Ks:
                        ranklist = heapq.nlargest(k, item_pred, key=item_pred.get)
                        hr = self.getHitRatio(ranklist, user_gt_item[user])
                        ndcg = self.getNDCG(ranklist, user_gt_item[user])
                        mrr = self.getMRR(ranklist, user_gt_item[user])
                        hrs.append(hr)
                        nds.append(ndcg)
                        mrs.append(mrr)
                    hits.append(hrs)
                    ndcgs.append(nds)
                    mrrs.append(mrs)
                hr_t, ndcg_t, mrr_t = np.array(hits).mean(axis=0), np.array(ndcgs).mean(axis=0), np.array(mrrs).mean(axis=0)
                #recommendation performance on validation set
#                 self.print_test_result(epoch, hr_t, ndcg_t, mrr_t, t2-t1, time()-t3, losses, loss_source, loss_target, 'target', data_status_t)
            t4 = time()
            
            #------------------testing on the testing set---------------
            print('\n'+'{:*^40}'.format('source result'))            
            for test_user_list_s, data_status_s in zip(self.split_ids_s, self.split_status_s):
                test_ratings = np.array(self.data_generator_s.ratingList)
                test_negatives = self.data_generator_s.negativeList

                hits, ndcgs, mrrs = [],[],[]
                users,items,user_gt_item = self.get_test_instance(test_user_list_s, test_ratings, test_negatives)
                num_test_batches = len(users)//self.args.batch_size
                # bar_test = ProgressBar('test_'+_data_type, max=num_test_batches+1)
                # sample_id = 0
                test_preds = []

#                 for current_batch in tqdm(range(num_test_batches+1),desc='test_source',ascii=True):
                for current_batch in range(num_test_batches+1):
                    # bar_test.next()
                    min_idx = current_batch*self.args.batch_size
                    max_idx = np.min([(current_batch+1)*self.args.batch_size,len(users)])
                    batch_input_users = users[min_idx:max_idx]
                    batch_input_items = items[min_idx:max_idx]
                    feed_dict = {self.users_s:batch_input_users,
                                 self.items_s:batch_input_items,
                                 self.node_dropout: [0.]*len(eval(self.layer_size)),
                                 self.mess_dropout: [0.]*len(eval(self.layer_size)),
                                 self.isTraining:True}
                    predictions = sess.run(self.scores_s, feed_dict=feed_dict)
                    test_preds.extend(predictions)
                assert len(test_preds)==len(users),'source num is not equal'

                user_item_preds = defaultdict(lambda: defaultdict(float))
                user_pred_gtItem = defaultdict(float)
                for sample_id in range(len(users)):
                    user = users[sample_id]
                    item = items[sample_id]
                    pred = test_preds[sample_id]  # [pos_prob, neg_prob]
                    user_item_preds[user][item] = pred
                for user in user_item_preds.keys():
                    item_pred = user_item_preds[user]
                    hrs,nds,mrs=[],[],[]
                    for k in self.Ks:
                        ranklist = heapq.nlargest(k, item_pred, key=item_pred.get)
                        hr = self.getHitRatio(ranklist, user_gt_item[user])
                        ndcg = self.getNDCG(ranklist, user_gt_item[user])
                        mrr = self.getMRR(ranklist, user_gt_item[user])
                        hrs.append(hr)
                        nds.append(ndcg)
                        mrs.append(mrr)
                    hits.append(hrs)
                    ndcgs.append(nds)
                    mrrs.append(mrs)
                test_hr_s, test_ndcg_s, test_mrr_s = np.array(hits).mean(axis=0), np.array(ndcgs).mean(axis=0), np.array(mrrs).mean(axis=0)    
                self.print_test_result(epoch, test_hr_s, test_ndcg_s, test_mrr_s, t2-t1, time()-t4, losses, loss_source, loss_target, 'source', data_status_s)
            t5 = time()
            
            print('\n'+'{:*^40}'.format('target result'))
            for test_user_list_t, data_status_t in zip(self.split_ids_t,self.split_status_t):
                test_ratings = np.array(self.data_generator_t.ratingList)
                test_negatives = self.data_generator_t.negativeList

                hits, ndcgs, mrrs = [],[],[]
                users,items,user_gt_item = self.get_test_instance(test_user_list_s, test_ratings, test_negatives)
                num_test_batches = len(users)//self.args.batch_size
                # bar_test = ProgressBar('test_'+_data_type, max=num_test_batches+1)
                # sample_id = 0
                test_preds = []

#                 for current_batch in tqdm(range(num_test_batches+1),desc='test_target',ascii=True):
                for current_batch in range(num_test_batches+1):
                    # bar_test.next()
                    min_idx = current_batch*self.args.batch_size
                    max_idx = np.min([(current_batch+1)*self.args.batch_size,len(users)])
                    batch_input_users = users[min_idx:max_idx]
                    batch_input_items = items[min_idx:max_idx]
                    feed_dict = {self.users_t:batch_input_users,
                                 self.items_t:batch_input_items,
                                 self.node_dropout: [0.]*len(eval(self.layer_size)),
                                 self.mess_dropout: [0.]*len(eval(self.layer_size)),
                                 self.isTraining:True}
                    predictions = sess.run(self.scores_t, feed_dict=feed_dict)
                    test_preds.extend(predictions)
                assert len(test_preds)==len(users),'target num is not equal'

                user_item_preds = defaultdict(lambda: defaultdict(float))
                user_pred_gtItem = defaultdict(float)
                for sample_id in range(len(users)):
                    user = users[sample_id]
                    item = items[sample_id]
                    pred = test_preds[sample_id]  # [pos_prob, neg_prob]
                    user_item_preds[user][item] = pred
                for user in user_item_preds.keys():
                    item_pred = user_item_preds[user]
                    hrs,nds,mrs=[],[],[]
                    for k in self.Ks:
                        ranklist = heapq.nlargest(k, item_pred, key=item_pred.get)
                        hr = self.getHitRatio(ranklist, user_gt_item[user])
                        ndcg = self.getNDCG(ranklist, user_gt_item[user])
                        mrr = self.getMRR(ranklist, user_gt_item[user])
                        hrs.append(hr)
                        nds.append(ndcg)
                        mrs.append(mrr)
                    hits.append(hrs)
                    ndcgs.append(nds)
                    mrrs.append(mrs)
                test_hr_t, test_ndcg_t, test_mrr_t = np.array(hits).mean(axis=0), np.array(ndcgs).mean(axis=0), np.array(mrrs).mean(axis=0)
                self.print_test_result(epoch, test_hr_t, test_ndcg_t, test_mrr_t, t2-t1, time()-t5, losses, loss_source, loss_target, 'target', data_status_t)
            t6 = time()            
            
            #reward, we use the differences as the main reward to update the DDQN model
            if hr_s[0] > max_hit_s[0] and hr_s[1] > max_hit_s[1]:
                max_hit_s = hr_s
                max_ndcg_s = ndcg_s
                max_mrr_s = mrr_s                
                
            if hr_t[0] > max_hit_t[0] and hr_t[1] > max_hit_t[1]:
                max_hit_t = hr_t
                max_ndcg_t = ndcg_t
                max_mrr_t = mrr_t
            
            #take the embeddings as state
            embeddings = np.concatenate((user_embedding_s, item_embedding_s, user_embedding_t, item_embedding_t), axis=0)
            
            self.loss_loger.append(loss)
            self.ndcg_loger.append(ndcg_s)
            self.hit_loger.append(hr_s)
            self.mrr_loger.append(mrr_s)

            self.ndcg_loger_t.append(ndcg_t)
            self.hit_loger_t.append(hr_t)
            self.mrr_loger_t.append(mrr_t)

        return embeddings, max_hit_s, max_ndcg_s, max_mrr_s, max_hit_t, max_ndcg_t, max_mrr_t
                

    #print validation or testing result
    def print_test_result(self, epoch, hr, ndcg, mrr, train_time, test_time, losses, loss_source, loss_target, domain_type, data_status):
        if self.args.verbose > 0:
            perf_str = 'Epoch %d [%.1fs + %.1fs]: train==[%.4f=%.4f + %.4f], hit=%s, ndcg=%s, mrr=%s at %s' % \
                    (epoch, train_time, test_time, losses, loss_source, loss_target , str(['%.4f'%i for i in hr]),
                        str(['%.4f'%i for i in ndcg]), str(['%.4f'%i for i in mrr]), data_status)
            print(perf_str)
    
    #build validation or testing data
    def get_test_instance(self, test_user_list, test_ratings, test_negatives):
        users,items = [],[]
        user_gt_item = {}
        for idx in test_user_list:
            rating = test_ratings[idx]
            items_neg = test_negatives[idx]
            u = rating[0]
            gtItem = rating[1]
            user_gt_item[u] = gtItem
            for item in items_neg:
                users.append(u)
                items.append(item)
            items.append(gtItem)
            users.append(u)
        return np.array(users),np.array(items),user_gt_item
    
    #HitRatio
    def getHitRatio(self, ranklist, gtItem):
        for item in ranklist:
            if item == gtItem:
                return 1
        return 0
    
    #MRR
    def getMRR(self, ranklist, gtItem):
        for i in range(len(ranklist)):
            item = ranklist[i]
            if item == gtItem:
                return 1/(i+1)
        return 0
    
    #NGCG
    def getNDCG(self, ranklist, gtItem):
        for i in range(len(ranklist)):
            item = ranklist[i]
            if item == gtItem:
                return math.log(2) / math.log(i+2)
        return 0