import torch
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

# 初始化模型和分词器
# model_name = "Helsinki-NLP/opus-mt-zh-en"  # 中文到英文的预训练模型
model_name="D:/mygitisoft/python-ai/pytorch/cn2en/"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# 输入和输出句子
input_sentences = ["你好啊", "你多大了", "我想去吃午饭", "你给我讲个故事","您好，我叫王鑫，很高兴认识你"]
output_sentences = ["我很好", "我今年18岁了", "正好，我们一起去", "从前有座山，山上有座庙，庙里有两个和尚","我也很高兴认识你"]

# 将输入句子翻译为英文
translated_inputs = []
for sentence in input_sentences:
    inputs = tokenizer(sentence, return_tensors="pt")
    output = model.generate(**inputs)
    translated_input = tokenizer.decode(output[0], skip_special_tokens=True)
    translated_inputs.append(translated_input)
    print(f"Translated Input: {translated_input}")

# 将输出句子翻译为英文
translated_outputs = []
for sentence in output_sentences:
    inputs = tokenizer(sentence, return_tensors="pt")
    output = model.generate(**inputs)
    translated_output = tokenizer.decode(output[0], skip_special_tokens=True)
    translated_outputs.append(translated_output)
    print(f"Translated Output: {translated_output}")