import nltk
from nltk.translate.bleu_score import sentence_bleu
import jieba # 使用jieba进行分词

def bleu_score_evaluation(generated_summary, reference_summary):
    # 确保已经下载了BLEU分数计算所需的NLTK数据
    nltk.download('punkt')
    def jieba_tokenize(text):
        return ' '.join(jieba.cut(text))

    # 分词后的生成摘要和参考摘要
    hyp = jieba_tokenize(generated_summary)
    ref = jieba_tokenize(reference_summary) 

    # 计算BLEU分数
    bleu_score = sentence_bleu(hyp, ref)
    return bleu_score


