from multiprocessing import Pool

import nltk
import os
import random
from nltk.translate.bleu_score import SmoothingFunction
from utils.forData import pre_process
from abc import abstractmethod
import numpy as np


class Metrics:
    def __init__(self, name='Metric'):
        self.name = name

    def get_name(self):
        return self.name

    def set_name(self, name):
        self.name = name

    @abstractmethod
    def get_score(self):
        pass

    @abstractmethod
    def reset(self):
        pass


class BLEU(Metrics):
    def __init__(self, name=None, test_text=None, real_text=None, gram=3, portion=1, if_use=False):
        assert type(gram) == int or type(gram) == list, 'Gram format error!'
        super(BLEU, self).__init__('%s-%s' % (name, gram))

        self.if_use = if_use
        self.test_text = test_text
        self.real_text = real_text
        self.gram = [gram] if type(gram) == int else gram
        self.sample_size = 200  # BLEU scores remain nearly unchanged for self.sample_size >= 200
        self.reference = None
        self.is_first = True
        self.portion = portion  # how many portions to use in the evaluation, default to use the whole test dataset

    def get_score(self, is_fast=False, given_gram=None):
        """
        Get BLEU scores.
        :param is_fast: Fast mode
        :param given_gram: Calculate specific n-gram BLEU score
        """
        if not self.if_use:
            return 0
        if self.is_first:
            self.get_reference()
            self.is_first = False
        if is_fast:
            return self.get_bleu_fast(given_gram)
        return self.get_bleu(given_gram)

    def reset(self, test_text=None, real_text=None):
        self.test_text = test_text if test_text else self.test_text
        self.real_text = real_text if real_text else self.real_text

    def get_reference(self):
        reference = self.real_text.copy()

        # randomly choose a portion of test data
        # In-place shuffle
        random.shuffle(reference)
        len_ref = len(reference)
        reference = reference[:int(self.portion * len_ref)]
        self.reference = reference
        return reference

    def get_bleu(self, given_gram=None):
        if given_gram is not None:  # for single gram
            bleu = list()
            reference = self.get_reference()
            weight = tuple((1. / given_gram for _ in range(given_gram)))
            for idx, hypothesis in enumerate(self.test_text[:self.sample_size]):
                bleu.append(self.cal_bleu(reference, hypothesis, weight))
            return round(sum(bleu) / len(bleu), 3)
        else:  # for multiple gram
            all_bleu = []
            for ngram in self.gram:
                bleu = list()
                reference = self.get_reference()
                weight = tuple((1. / ngram for _ in range(ngram)))
                for idx, hypothesis in enumerate(self.test_text[:self.sample_size]):
                    bleu.append(self.cal_bleu(reference, hypothesis, weight))
                all_bleu.append(round(sum(bleu) / len(bleu), 3))
            return all_bleu

    @staticmethod
    def cal_bleu(reference, hypothesis, weight):
        return nltk.translate.bleu_score.sentence_bleu(reference, hypothesis, weight,
                                                       smoothing_function=SmoothingFunction().method1)

    def get_bleu_fast(self, given_gram=None):
        reference = self.get_reference()
        if given_gram is not None:  # for single gram
            return self.get_bleu_parallel(ngram=given_gram, reference=reference)
        else:  # for multiple gram
            all_bleu = []
            for ngram in self.gram:
                all_bleu.append(self.get_bleu_parallel(ngram=ngram, reference=reference))
            return all_bleu

    def get_bleu_parallel(self, ngram, reference):
        weight = tuple((1. / ngram for _ in range(ngram)))
        pool = Pool(os.cpu_count())
        result = list()
        for idx, hypothesis in enumerate(self.test_text[:self.sample_size]):
            result.append(pool.apply_async(self.cal_bleu, args=(reference, hypothesis, weight)))
        score = 0.0
        cnt = 0
        for i in result:
            score += i.get()
            cnt += 1
        pool.close()
        pool.join()
        return round(score / cnt, 3)


# def approximate(d):
#     unit = 0.25
#     flag = True
#     if d < 0:
#         flag = False
#         d = -d
#     quotient = int(d / unit)
#     remainder = d % unit
#
#     if flag:
#         if remainder < unit / 2:
#             return unit * quotient
#         else:
#             return unit * (quotient + 1)
#     else:
#         if remainder < unit / 2:
#             return -(unit * quotient)
#         else:
#             return -(unit * (quotient + 1))
#
#
# def pre_process(path):
#     data = np.load(path, allow_pickle=True)
#
#     data_T = data.T
#     data_T[3] = np.round(data_T[3])
#
#     data_T[5] = np.round(data_T[5])
#     for pitch in range(0, len(data_T[5])):
#         for sentence in range(0, len(data_T[5][pitch])):
#             if data_T[5][pitch][sentence] < 0:
#                 data_T[5][pitch][sentence] = 0
#
#     for pitch in range(0, len(data_T[4])):
#         for sentence in range(0, len(data_T[4][pitch])):
#             temp = approximate(data_T[4][pitch][sentence])
#             if temp <= 0.25:
#                 temp = 0.25
#             data_T[4][pitch][sentence] = temp
#
#     data = data_T.T
#     return data


# out_data=pre_process('/home/b8313/coding/music/melody-generator-gan/src/sangle_save/genera_0.npy')


def get_from_zero_list(test_text):

    test_text_min=np.mean(test_text,axis=1,dtype=np.int)

    test_text_min_from_zero=list()
    for i in range(len(test_text_min)):
        test_text_min_from_zero.append((test_text[i]-test_text_min[i]).tolist())

    return test_text_min_from_zero




# aa=get_from_zero_list(real_text)





if __name__ == '__main__':
    downloadbase=r'C:\Users\masaikk\Downloads'
    # random_line_data1=np.load(r'D:\coding\melody-generator-gan\src\data\30020random1.npy',allow_pickle=True).tolist()
    # random_line_data2=np.load(r'D:\coding\melody-generator-gan\src\data\30020random2.npy',allow_pickle=True).tolist()

    # out_data = pre_process(downloadbase+r'\epoch470_gen_data_0.npy')
    # out_data2 = pre_process(downloadbase+r'\epoch470_gen_data_5.npy')


    out_data=pre_process(r'/home/b8313/coding/music/music_essay/outputdata/pureGRU/otherGRU/genera_gru_0.npy')
    out_data2 = pre_process(
        r'/home/b8313/coding/music/music_essay/outputdata/pureGRU/otherGRU/genera_gru_5.npy')
    test_text = out_data.T[3].T
    real_text = out_data.T[0].T
    test_text_for_self = out_data2.T[3].T
    blue=BLEU('BLEU', gram=[2, 3, 4, 5], if_use=True)
    music_bleu=BLEU('music-BLEU', gram=[2, 3, 4, 5], if_use=True)
    self_bleu = BLEU('Self-BLEU', gram=[2, 3, 4], if_use=True)

    # music_bleu_real=get_from_zero_list(np.load(r'D:\coding\melody-generator-gan\src\data\30020random1.npy',allow_pickle=True))
    # music_bleu_real.extend(real_text.tolist())
    music_bleu_test=get_from_zero_list(real_text)
    # music_bleu_test.extend(test_text.tolist())
# test_text=[[65,65,65,65]]
# real_text=[[61,62,63,64],[63,64,65,65]]
    blue.reset(test_text=test_text.tolist(),real_text=real_text.tolist())
    music_bleu.reset(test_text=get_from_zero_list(test_text),real_text=get_from_zero_list(real_text))
    self_bleu.reset(test_text=test_text.tolist(),real_text=test_text_for_self.tolist())

    # blue.reset(test_text=)
    print('bleu :{}'.format(blue.get_score()))
    print('self bleu :{}'.format(self_bleu.get_score()))

    print('music bleu :{}'.format(music_bleu.get_score()))



