# from transformers import LlamaTokenizerFast, Llama2ForConditionalGeneration
# 导入所需库
import torch
from configparser import ConfigParser
from transformers import MarianMTModel, MarianTokenizer
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

class TranslateHelsinki():
    _instance = None

    _PATH_MODEL = None
    _tokenizer = None
    _model = None

    def __init__(self, path_config):
        # print("__init__")
        if self._instance is not None:
            return
        # 创建ConfigParser对象
        config = ConfigParser()
        # 读取配置文件
        # config.read('./files/config.inf', encoding='utf-8')
        config.read(path_config, encoding='utf-8')
        # 获取default部分的api_key
        self._PATH_MODEL = config.get('default', 'helsinki_path')
        print("helsinki_path from:", self._PATH_MODEL)
        if self._tokenizer == None or self._model == None:
            print("init model Helsinki")
            # 指定已存在的翻译模型和tokenizer
            self._tokenizer = MarianTokenizer.from_pretrained(self._PATH_MODEL)
            self._model = MarianMTModel.from_pretrained(self._PATH_MODEL)

    def __new__(cls, path_config):
        # print("__new__")
        if cls._instance is None:
            instance = super().__new__(cls)
            instance.__init__(path_config)  # 如果需要初始化操作
            cls._instance = instance
        return cls._instance


    def testHelsinkiZh2En(self):
        # 准备输入文本
        english_text = "青桔柠檬百香果茶"
        inputs = self._tokenizer(english_text, return_tensors="pt")
        # 使用模型进行翻译
        translated_text = self._model.generate(**inputs, max_length=128, num_beams=4, early_stopping=True)
        # 解码输出，移除特殊标记
        translated_output = self._tokenizer.decode(translated_text[0], skip_special_tokens=True)
        #
        print(f"英文原文: {english_text}")
        print(f"中文翻译: {translated_output}")

    def testTranslate(self):
        #
        zh_list = ['柠檬芝士蛋糕', '巧克力树莓卷', '巴斯克芝士蛋糕', '蛋黄肉松青团', '双皮奶']
        #
        for zh in zh_list:
            # 准备输入文本
            inputs = self._tokenizer(zh, return_tensors="pt")
            #
            # 使用模型进行翻译
            translated_text = self._model.generate(**inputs, max_length=128, num_beams=4, early_stopping=True)
            #
            # 解码输出，移除特殊标记
            translated_output = self._tokenizer.decode(translated_text[0], skip_special_tokens=True)
            #
            print(f"中文原文: {zh}")
            print(f"英文翻译: {translated_output}")
            print("---------------------------------------")


    def translateList(self, zh_list):
        #
        en_list = []
        #
        for zh in zh_list:
            # 准备输入文本
            inputs = self._tokenizer(zh, return_tensors="pt")
            #
            # 使用模型进行翻译
            translated_text = self._model.generate(**inputs, max_length=128, num_beams=4, early_stopping=True)
            #
            # 解码输出，移除特殊标记
            translated_output = self._tokenizer.decode(translated_text[0], skip_special_tokens=True)
            en_list.append(translated_output)
            #
            print(f"中文原文: {zh}")
            print(f"英文翻译: {translated_output}")
            print("---------------------------------------")
        return en_list



if __name__ == '__main__':
    th1 = TranslateHelsinki('../files/config.inf')
    th1.testTranslate()
    th1.testHelsinkiZh2En()
    th2 = TranslateHelsinki('../files/config.inf')
    th2.testHelsinkiZh2En()