galactica-125m / app.py
sushmitxo's picture
Update app.py
f7454ed
import torch
import gradio as gr
from transformers import AutoTokenizer, OPTForCausalLM
tokenizer = AutoTokenizer.from_pretrained("facebook/galactica-125m")
model = OPTForCausalLM.from_pretrained("facebook/galactica-125m",cache_dir='${PWD}/cache',offload_folder='${PWD}/offload/')
def process_text(text):
input_ids = tokenizer(text, return_tensors="pt").input_ids
outputs = model.generate(input_ids)
return tokenizer.decode(outputs[0])
input_text = gr.inputs.Textbox(placeholder='Enter text to process')
output_text = gr.outputs.Textbox()
interface = gr.Interface(process_text, input_text, output_text, title="Text Processing Demo")
interface.launch()