import torch
import torch.nn as nn
import sys
import numpy as np


class Evaluator:
    def __init__(self, dataloader, func, logger, hyper_params):
        """
        func: (model, args) -> (pred: Tensor, target: list, history: list)
        """
        self.dataloader = dataloader
        self.func = func
        self.Ks = hyper_params['Ks']
        self.device = hyper_params['device']

        # Global High.
        self.high_hr = [0.0 for _ in range(len(self.Ks))]
        self.high_ndcg = [0.0 for _ in range(len(self.Ks))]
        self.high_mrr = [0.0 for _ in range(len(self.Ks))]

        self.cur_hr = None
        self.cur_ndcg = None
        self.cur_mrr = None
        self.logger = logger

        self.use_fold = hyper_params['use_fold']
        self.evaluate_users = hyper_params['evaluate_users']
        self.hyper_params = hyper_params

    def generate_result_str(self, hr, ndcg, mrr):
        Ks = self.Ks
        s = ''
        for i in range(len(Ks)):
            s += 'HR@' + str(Ks[i]) + ':' + str(hr[i]) + ' '
        s += '\n'

        for i in range(len(Ks)):
            s += 'NDCG@' + str(Ks[i]) + ':' + str(ndcg[i]) + ' '
        s += '\n'

        for i in range(len(Ks)):
            s += 'MRR@' + str(Ks[i]) + ':' + str(mrr[i]) + ' '
        s += '\n'
        return s

    def get_high_result(self):
        return self.high_hr, self.high_ndcg, self.high_mrr

    def get_cur_result(self):
        return self.cur_hr, self.cur_ndcg, self.cur_mrr

    def evaluate(self, model):
        print('Start evaluate...')
        self.logger.info('Start evaluate...')
        model.eval()
        Ks = self.Ks

        with torch.no_grad():
            # Parameters
            checked_users = 0
            self.cur_hr = [0.0 for _ in range(len(Ks))]
            self.cur_ndcg = [0.0 for _ in range(len(Ks))]
            self.cur_mrr = [0.0 for _ in range(len(Ks))]

            for inputs in self.dataloader:
                input_list = [i.to(self.device) for i in inputs]

                # Forward.
                pred, history, batchy = self.func(model, *input_list)
                pred = pred.detach()

                flag = False
                for i in range(pred.shape[0]):
                    if not self.use_fold:
                        normalized_item_popular = torch.ones(
                            self.hyper_params['total_items'] + 1)
                        # Sample 100 negetive items with itself.
                        rated = history[i]
                        item_idx = [batchy[i][0]]

                        while len(item_idx) < 101:
                            sample_ids = torch.multinomial(
                                normalized_item_popular, num_samples=101).numpy()
                            sample_ids = [
                                x for x in sample_ids if x not in rated and x not in item_idx]
                            item_idx.extend(sample_ids[:])
                        item_idx = item_idx[: 101]

                        item_idx = torch.Tensor(item_idx).long()
                        # print(f'{pred.shape=}')
                        cur_pred = pred[i, item_idx]

                        # print(f'{cur_pred=}')
                        # print(f'{torch.argsort(cur_pred)=}')
                        cur_rank = torch.argsort(
                            cur_pred, descending=True).argsort()[0].item() + 1

                        for j in range(len(Ks)):
                            if cur_rank <= Ks[j]:
                                self.cur_hr[j] += 1
                                self.cur_ndcg[j] += 1 / np.log2(cur_rank + 1)
                                self.cur_mrr[j] += 1 / cur_rank
                    else:
                        for j, k in enumerate(Ks):
                            best, now_at, dcg, hits = 0.0, 0.0, 0.0, 0.0
                            last_pred = pred[i].scatter_(
                                dim=0, index=torch.LongTensor(history[i]).to(self.device), value=-10000000.0)
                            last_pred[0] = -10000000.0

                            rec_list = list(torch.argsort(
                                last_pred, descending=True)[:k].cpu().numpy())

                            first_correct = sys.maxsize
                            for movie in batchy[i]:
                                movie = movie.item()
                                if movie == 0:
                                    break
                                now_at += 1.0
                                if now_at <= k:
                                    best += 1.0 / float(np.log2(now_at + 1))

                                if movie not in rec_list:
                                    continue

                                hits += 1.0
                                dcg += 1.0 / \
                                    float(
                                        np.log2(float(rec_list.index(movie) + 2)))
                                first_correct = min(
                                    first_correct, rec_list.index(movie))

                            self.cur_hr[j] += float(hits) / float(now_at)
                            self.cur_ndcg[j] += float(dcg) / float(best)
                            self.cur_mrr[j] += 1.0 / (first_correct + 1)

                    checked_users += 1
                    if self.evaluate_users is not None and checked_users >= self.evaluate_users:
                        flag = True
                        break

                if flag:
                    break

            for i in range(len(Ks)):
                self.cur_hr[i] /= checked_users
                self.cur_ndcg[i] /= checked_users
                self.cur_mrr[i] /= checked_users

                self.high_hr[i] = max(self.high_hr[i], self.cur_hr[i])
                self.high_ndcg[i] = max(self.high_ndcg[i], self.cur_ndcg[i])
                self.high_mrr[i] = max(self.high_mrr[i], self.cur_mrr[i])

            cur_str = self.generate_result_str(
                self.cur_hr, self.cur_ndcg, self.cur_mrr)

            print(cur_str)
            self.logger.info(
                f'----------------------EVALUATE RESULT----------------------')
            self.logger.info(cur_str)

            high_str = self.generate_result_str(
                self.high_hr, self.high_ndcg, self.high_mrr)
            print(high_str)
            self.logger.info(
                f'----------------------HIGHEST RESULT----------------------')
            self.logger.info(high_str)
