import gradio as gr from transformers import T5ForConditionalGeneration, T5Tokenizer from textwrap import fill # Load fine-tuned model and tokenizer last_checkpoint = "Jyotiyadav/model2.0" finetuned_model = T5ForConditionalGeneration.from_pretrained(last_checkpoint) tokenizer = T5Tokenizer.from_pretrained(last_checkpoint) # Define inference function def answer_question(question): # Format input inputs = ["Please answer this question: " + question] inputs = tokenizer(inputs, return_tensors="pt") # Generate answer outputs = finetuned_model.generate(**inputs) answer = tokenizer.decode(outputs[0], skip_special_tokens=True) # Wrap answer for better display return fill(answer, width=80) # Create Gradio interface iface = gr.Interface( fn=answer_question, inputs="text", outputs="text", title="LLM Flan-T5 - Store Sales Prediction(Time Series Forecasting)", description="We have utilised FLANT-5 Model for Time Series Forecasting", examples=[ ["For store number 1 in the city of Quito, with products from various categories such as AUTOMOTIVE, during a 0 on 2017-8-16, with no, cluster 13, and WTI crude oil price at $46.8, what were the total sales on that day?"], ["For store number 1 in the city of Quito, with products from various categories such as BABY CARE, during a 0 on 2017-8-16, with no, cluster 13, and WTI crude oil price at $46.8, what were the total sales on that day?"], ["For store number 1 in the city of Quito, with products from various categories such as BEAUTY, during a 0 on 2017-8-16, with promotions, cluster 13, and WTI crude oil price at $46.8, what were the total sales on that day?"], ["For store number 1 in the city of Quito, with products from various categories such as HOME CARE, during a 0 on 2017-8-16, with promotions, cluster 13, and WTI crude oil price at $46.8, what were the total sales on that day?"] ] ) # Launch Gradio interface iface.launch()