import gradio as gr from transformers import AutoTokenizer, AutoModelForSeq2SeqLM tokenizer = AutoTokenizer.from_pretrained("mrm8488/flan-t5-small-finetuned-samsum") model = AutoModelForSeq2SeqLM.from_pretrained("mrm8488/flan-t5-small-finetuned-samsum") def predict_sentiment(input, words): input_ids = tokenizer(input, return_tensors="pt").input_ids outputs = model.generate(input_ids, max_length=words) decoded_output = tokenizer.decode(outputs[0], skip_special_tokens=True) return f"{decoded_output}" conversation = gr.Textbox(lines=2, placeholder="Conversations Here...") iface = gr.Interface(fn=predict_sentiment, inputs=[conversation, gr.Slider(10, 100)], outputs="text") iface.launch()