Spaces:
Paused
Paused
File size: 1,699 Bytes
e71614a ebe3c16 e71614a e91bcad e71614a e91bcad ef36f99 e71614a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 |
import gradio as gr
from texify.inference import batch_inference
from texify.model.model import load_model
from texify.model.processor import load_processor
from PIL import Image
title="""🙋🏻♂️Welcome to🌟Tonic's👨🏻🔬Texify"""
description="""You can upload a picture with a math formula and this model will return latex formulas. Texify is a multimodal input model. You can use this Space to test out the current model [vikp/texify2](https://huggingface.co/vikp/texify2) You can also use vikp/texify2🚀 by cloning this space. Simply click here: [Duplicate Space](https://huggingface.co/spaces/Tonic1/texify?duplicate=true)
Join us: TeamTonic is always making cool demos! Join our active builder's community on Discord: [Discord](https://discord.gg/nXx5wbX9) On Huggingface: [TeamTonic](https://huggingface.co/TeamTonic) & [MultiTransformer](https://huggingface.co/MultiTransformer) On Github: [Polytonic](https://github.com/tonic-ai) & contribute to [PolyGPT](https://github.com/tonic-ai/polygpt-alpha) You can also join the [texify community here](https://discord.gg/zJSDQJWDe8). Big thanks to Vik Paruchuri for the invite and Huggingface for the Community Grant. Your special attentions are much appreciated.
"""
model = load_model()
processor = load_processor()
def process_image(img):
img = Image.fromarray(img)
results = batch_inference([img], model, processor)
return '\n'.join(results) if isinstance(results, list) else results
iface = gr.Interface(
gr.Markdown(title),
gr.Markdown(description),
fn=process_image,
inputs=gr.inputs.Image(type="pil"),
outputs="text"
)
if __name__ == "__main__":
iface.launch() |