Spaces:
Running
Running
File size: 1,535 Bytes
1bba206 4130272 1bba206 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from datasets import load_dataset
import pandas as pd
# Load model and tokenizer
model_name = "google/flan-t5-small"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
# Load CSV data (replace with your dataset logic)
def load_data():
dataset = load_dataset("LR36/BehavioralAnalysis", split="train") # Replace with your dataset name
return dataset["train"]
# Generate text (inference)
def generate_text(prompt):
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512)
outputs = model.generate(**inputs, max_new_tokens=100)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
# Fine-tune button (simplified example)
def fine_tune():
dataset = load_data()
# Add your training logic here (see previous examples)
return "Fine-tuning complete! (Note: Models reset when Space stops.)"
# Gradio UI
with gr.Blocks() as demo:
gr.Markdown("# FLAN-T5 Demo")
with gr.Tab("Generate Text"):
prompt = gr.Textbox(label="Input Prompt")
generate_btn = gr.Button("Generate")
output = gr.Textbox(label="Output")
generate_btn.click(fn=generate_text, inputs=prompt, outputs=output)
with gr.Tab("Fine-Tune"):
train_btn = gr.Button("Train on CSV Data")
train_output = gr.Textbox(label="Training Status")
train_btn.click(fn=fine_tune, outputs=train_output)
demo.launch() |