
import jieba
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
import re
from googlemadlad400_3b_mt import Translator
from qwen3_1_7b import translate_text
from modelscope.msdatasets import MsDataset
from tqdm import tqdm

ds =  MsDataset.load('iic/WMT-Chinese-to-English-Machine-Translation-newstest', subset_name='default', split='test')
temp_zh = ds.ds_instance.data.table.columns[0].chunks[0]
temp_en = ds.ds_instance.data.table.columns[1].chunks[0]
sum_bleu1, sum_bleu2, sum_bleu3, sum_bleu4 =0 ,0 ,0 ,0


translator = Translator()#googlemadlad400-3b-mt
# translator = TextTranslator() #Helsinki-NLPopus-mt-mul-en
for idx, source in enumerate(tqdm(temp_zh, desc="Processing")):
    print("中文原文:",str(source))
    target =str(temp_en[idx])
    print("英文原文:",target)
    # text = translate_text(str(source)) #qwen3-1.7b
    text = translator.translate(str(source), "2en") #googlemadlad400-3b-mt
    # text = translator.translate(str(source)) #Helsinki-NLPopus-mt-mul-en
    pattern = r'<think>.*?</think>'
    inference = re.sub(pattern, '', text, flags=re.DOTALL).strip()
    print("翻译文本:", inference)

    # 分词
    source_fenci = ' '.join(jieba.cut(str(source)))
    target_fenci = ' '.join(jieba.cut(target))
    inference_fenci = ' '.join(jieba.cut(inference))

    # reference是标准答案 是一个列表，可以有多个参考答案，每个参考答案都是分词后使用split()函数拆分的子列表
    # # 举个reference例子
    # reference = [['this', 'is', 'a', 'duck']]
    reference = []  # 给定标准译文
    candidate = []  # 神经网络生成的句子
    # 计算BLEU
    reference.append(target_fenci.split())
    candidate = (inference_fenci.split())
    score1 = sentence_bleu(reference, candidate, weights=(1, 0, 0, 0), smoothing_function=SmoothingFunction().method1)
    sum_bleu1 += score1
    score2 = sentence_bleu(reference, candidate, weights=(0, 1, 0, 0), smoothing_function=SmoothingFunction().method1)
    sum_bleu2 += score2
    score3 = sentence_bleu(reference, candidate, weights=(0, 0, 1, 0), smoothing_function=SmoothingFunction().method1)
    sum_bleu3 += score3
    score4 = sentence_bleu(reference, candidate, weights=(0, 0, 0, 1), smoothing_function=SmoothingFunction().method1)
    sum_bleu4 += score4
    reference.clear()
    print("********************************")
    print('Cumulate 1-gram :%f' \
          % score1)
    print('Cumulate 2-gram :%f' \
          % score2)
    print('Cumulate 3-gram :%f' \
          % score3)
    print('Cumulate 4-gram :%f' \
          % score4)
    print("********************************")


print("########################################")

print("**********average**************")
print('Cumulate 1-gram :%f' \
      % (sum_bleu1/len(temp_zh)))
print('Cumulate 2-gram :%f' \
      %(sum_bleu2/len(temp_zh)))
print('Cumulate 3-gram :%f' \
      % (sum_bleu3/len(temp_zh)))
print('Cumulate 4-gram :%f' \
      % (sum_bleu4/len(temp_zh)))
print("***********average**************")
