ahmadreza13's picture
Update app.py
9f978c0 verified
raw
history blame
1.95 kB
import gradio as gr
from gradio import Interface, Textbox, Markdown,Slider
from transformers import AutoModelForCausalLM , AutoTokenizer
import torch
openelm_270m_instruct = AutoModelForCausalLM.from_pretrained("apple/OpenELM-270M-Instruct", trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained("NousResearch/Llama-2-7b-hf")
def generate(newQuestion,num):
tokenized_prompt = tokenizer(newQuestion)
tokenized_prompt = torch.tensor(
tokenized_prompt['input_ids'],
)
tokenized_prompt = tokenized_prompt.unsqueeze(0)
# Generate
output_ids = openelm_270m_instruct.generate(
tokenized_prompt,
max_length=int(num),
pad_token_id=0,
)
output_text = tokenizer.decode(
output_ids[0].tolist(),
skip_special_tokens=True
)
return output_text
developer_info = """
this space is developed by Ahmadreza Anaami \n
feel free to set via Api key too \n
apple/OpenELM-270M-Instruct
"""
def greet(name,num):
return generate(name,num)
iface = gr.Interface(
fn=greet,
inputs=[Textbox(label="Enter Text Here:", type="text"),Textbox(label="number of generated tokens:", type="text")],
outputs=[Textbox(label="generated answer:")],
title="OpenELM-270M-Instruct",
# Markdown(developer_info, elem_id="dev-info"), # Place Markdown directly
description = developer_info,
css="""
/* Style the developer info section (optional) */
#dev-info {
font-size: 0.8rem;
color: #888; /* Adjust color as desired */
margin-top: 1rem;
text-align: center;
}
/* Style the input area (optional) */
.gr-input text {
padding: 10px;
border-radius: 5px;
font-size: 1rem;
}
/* Style the score label (optional) */
.gr-output.gr-slider label {
font-weight: bold;
}
""",
)
iface.launch()