# -*- coding: utf-8 -*-
# @Author: lidongdong
# @time  : 19-1-25 上午11:36
# @file  : metric_utils.py

"""
metric utils:
this module is used to evaluate the current model performance
"""

from nltk.translate.bleu_score import sentence_bleu
from nltk.translate.bleu_score import corpus_bleu
import numpy as np

reference = [['this', 'is', 'a', 'test'], ['this', 'is' 'test']]
candidate = ['this', 'is', 'a', "sturang", "metric"]
score = sentence_bleu(reference, candidate, weights=(0.5, 0.5))
print score

references = np.asarray([[['this', 'is', 'a', 'test'], ['this', 'is' 'test', "<pad>"]], [['this', 'is', 'a', 'test'], ['this', 'is', 'test', "<pad>"]]])
candidates = np.asarray([['this', 'is', 'a'], ['this', 'is', 'a']])
score = corpus_bleu(references, candidates, weights=(0.5, 0.5))
print(score)
