Spaces:
Running
Running
import os | |
from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM | |
import gradio as gr | |
# Model name and Hugging Face token | |
MODEL_NAME = "Pisethan/sangapac-math" | |
TOKEN = os.getenv("HF_API_TOKEN") | |
if not TOKEN: | |
raise ValueError("Hugging Face API token not found. Set it as an environment variable (HF_API_TOKEN).") | |
# Load model and tokenizer | |
try: | |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_auth_token=TOKEN) | |
model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME, use_auth_token=TOKEN) | |
generator = pipeline("text2text-generation", model=model, tokenizer=tokenizer) | |
except Exception as e: | |
generator = None | |
print(f"Error loading model or tokenizer: {e}") | |
def predict(input_text): | |
if generator is None: | |
return "Model not loaded properly.", {"Error": "Model not loaded properly."} | |
try: | |
# Generate output | |
result = generator(input_text, max_length=256, num_beams=5, early_stopping=True) | |
generated_text = result[0]["generated_text"] | |
simple_result = f"Generated Solution:\n{generated_text}" | |
detailed_result = { | |
"Input": input_text, | |
"Generated Solution": generated_text, | |
} | |
return simple_result, detailed_result | |
except Exception as e: | |
return "An error occurred.", {"Error": str(e)} | |
# Gradio interface | |
sample_inputs = [ | |
["1 + 1 = ?"], | |
["(5 + 3) × 2 = ?"], | |
["12 ÷ 4 = ?"], | |
["Solve for x: x + 5 = 10"], | |
] | |
interface = gr.Interface( | |
fn=predict, | |
inputs=gr.Textbox(lines=2, placeholder="Enter a math problem..."), | |
outputs=[ | |
gr.Textbox(label="Simple Output"), | |
gr.JSON(label="Detailed JSON Output"), | |
], | |
title="Sangapac Math Model", | |
description=( | |
"A model that solves math problems and provides step-by-step solutions. " | |
"Examples include Arithmetic, Multiplication, Division, Algebra, and Geometry problems." | |
), | |
examples=sample_inputs, | |
) | |
interface.launch() | |