# from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
#
# # 指定本地模型和tokenizer的路径
# model_path = "/data/Helsinki-NLPopus-mt-mul-en"
#
# # 加载本地的tokenizer和model
# try:
#     tokenizer = AutoTokenizer.from_pretrained(model_path)
#     print("Tokenizer loaded successfully.")
# except Exception as e:
#     print(f"Failed to load tokenizer: {e}")
#     exit(1)
#
# try:
#     model = AutoModelForSeq2SeqLM.from_pretrained(model_path)
#     print("Model loaded successfully.")
# except Exception as e:
#     print(f"Failed to load model: {e}")
#     exit(1)
#
# # 输入文本需要加上"translate Chinese to English: "前缀
# input_text = "你是谁？,你呢个做什么？"
# inputs = tokenizer(input_text, return_tensors="pt", padding=True)
#
# # 打印输入张量以进行调试
# print(f"Input tokens: {inputs}")
#
# # 进行推理，调整生成参数
# outputs = model.generate(**inputs, max_length=50, num_beams=4, early_stopping=True)
#
# # 解码输出结果
# translated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
# print(f"Translated text: '{translated_text}'")


from transformers import AutoTokenizer, AutoModelForSeq2SeqLM


class TextTranslator:
    def __init__(self, model_path="/data/Helsinki-NLPopus-mt-mul-en"):
        self.model_path = model_path
        try:
            self.tokenizer = AutoTokenizer.from_pretrained(model_path)
            print("Tokenizer loaded successfully.")
        except Exception as e:
            print(f"Failed to load tokenizer: {e}")
            raise

        try:
            self.model = AutoModelForSeq2SeqLM.from_pretrained(model_path)
            print("Model loaded successfully.")
        except Exception as e:
            print(f"Failed to load model: {e}")
            raise

    def translate(self, input_text, max_length=50, num_beams=4, early_stopping=True):

        inputs = self.tokenizer(input_text, return_tensors="pt", padding=True)

        # 打印输入张量以进行调试
        print(f"Input tokens: {inputs}")

        # 进行推理，调整生成参数
        outputs = self.model.generate(**inputs, max_length=max_length, num_beams=num_beams,
                                      early_stopping=early_stopping)

        # 解码输出结果
        translated_text = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
        return translated_text


# 示例用法
if __name__ == "__main__":
    translator = TextTranslator("/data/Helsinki-NLPopus-mt-mul-en")
    translation = translator.translate("你是谁？,你呢个做什么？")
    print(f"Translated text: '{translation}'")









