|
|
|
import gradio as gr |
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
import torch |
|
|
|
model_name = "deepseek-ai/DeepSeek-V2-Lite" |
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) |
|
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True, torch_dtype=torch.bfloat16).cpu() |
|
model.generation_config = GenerationConfig.from_pretrained(model_name) |
|
model.generation_config.pad_token_id = model.generation_config.eos_token_id |
|
|
|
def math_inference(input_text): |
|
inputs = tokenizer(input_text, return_tensors="pt") |
|
output = model.generate(**inputs) |
|
response = tokenizer.decode(output[0], skip_special_tokens=True) |
|
return response |
|
|
|
|
|
iface = gr.Interface( |
|
fn=math_inference, |
|
inputs=gr.Textbox(prompt="Input math question"), |
|
outputs=gr.Textbox(prompt="Math answer"), |
|
layout="vertical", |
|
title="Math Solver" |
|
) |
|
|
|
|
|
iface.launch() |
|
|