Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
# Load tokenizer and model | |
tokenizer = AutoTokenizer.from_pretrained("TuringsSolutions/LegalGemmaV1", trust_remote_code=True) | |
model = AutoModelForCausalLM.from_pretrained("TuringsSolutions/LegalGemmaV1", trust_remote_code=True) | |
def predict(prompt, temperature, max_tokens): | |
inputs = tokenizer(prompt, return_tensors="pt") | |
outputs = model.generate( | |
**inputs, | |
max_new_tokens=max_tokens, | |
temperature=temperature | |
) | |
response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
return response | |
# Create Gradio interface | |
iface = gr.Interface( | |
fn=predict, | |
inputs=[ | |
gr.Textbox(lines=2, placeholder="Enter your prompt here..."), | |
gr.Slider(minimum=0.1, maximum=1.0, value=0.7, label="Temperature"), | |
gr.Slider(minimum=10, maximum=200, value=50, step=10, label="Number of Output Tokens") | |
], | |
outputs="text", | |
title="Gemma 2 2B Law Case Management Model", | |
description="A model to assist with law case management. Adjust the temperature and number of output tokens as needed." | |
) | |
# Launch the Gradio app | |
iface.launch() |