import gradio as gr from transformers import AutoTokenizer, AutoModelForSeq2SeqLM from datasets import load_dataset import pandas as pd # Load model and tokenizer model_name = "google/flan-t5-small" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name) # Load CSV data (replace with your dataset logic) def load_data(): dataset = load_dataset("LR36/BehavioralAnalysis", split="train") # Replace with your dataset name return dataset["train"] # Generate text (inference) def generate_text(prompt): inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512) outputs = model.generate(**inputs, max_new_tokens=100) return tokenizer.decode(outputs[0], skip_special_tokens=True) # Fine-tune button (simplified example) def fine_tune(): dataset = load_data() # Add your training logic here (see previous examples) return "Fine-tuning complete! (Note: Models reset when Space stops.)" # Gradio UI with gr.Blocks() as demo: gr.Markdown("# FLAN-T5 Demo") with gr.Tab("Generate Text"): prompt = gr.Textbox(label="Input Prompt") generate_btn = gr.Button("Generate") output = gr.Textbox(label="Output") generate_btn.click(fn=generate_text, inputs=prompt, outputs=output) with gr.Tab("Fine-Tune"): train_btn = gr.Button("Train on CSV Data") train_output = gr.Textbox(label="Training Status") train_btn.click(fn=fine_tune, outputs=train_output) demo.launch()