from transformers import pipeline
Load the model from the Hugging Face Model Hub
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
tokenizer = AutoTokenizer.from_pretrained("ihgn/similar-questions")
model = AutoModelForSeq2SeqLM.from_pretrained("ihgn/similar-questions")
model = pipeline("text2text-generation", model=model_name)
Configure the generation parameters
generation_config = {
"max_length": 512,
"num_beams": 1,
"top_k": 50,
"top_p": 0.92,
"do_sample": True,
"num_return_sequences": 1
}
Generate text using the configured parameters
input_text= "Your input text goes here."
input_ids = tokenizer.encode(input_text, return_tensors="pt")
generated_ids = model(input_ids, **generation_config)
generated_text = tokenizer.decode(generated_ids.squeeze(), skip_special_tokens=True)
Print the generated text
print(generated_text)
- Downloads last month
- 7
This model does not have enough activity to be deployed to Inference API (serverless) yet. Increase its social
visibility and check back later, or deploy to Inference Endpoints (dedicated)
instead.