
from transformers import AutoTokenizer,  AutoModelForCausalLM
from transformers import pipeline, set_seed
import torch
model_name = r'I:\models\gt2'
generator = pipeline('text-generation', model=r'I:\models\gt2')
ot = generator("Hello, I'm a language model,", max_length=30, num_return_sequences=5)
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
model.eval()

def generate_response(input_text):
    inputs = tokenizer.encode(input_text, return_tensors='pt',max_length=512,truncation=True)
    #生成响应
    with torch.no_grad():
        outputs = model.generate(inputs)
    response = tokenizer.decode(outputs[0])
    return response

generate_response('这款商品支持退货吗？')