openbiollm / app.py
suchinth08's picture
Update app.py
4779b49 verified
raw
history blame contribute delete
994 Bytes
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
from huggingface_hub import login
from transformers import LlamaTokenizer, LlamaForCausalLM
from transformers import pipeline
pipe = pipeline("text-generation", model="aaditya/Llama3-OpenBioLLM-8B")
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("aaditya/Llama3-OpenBioLLM-8B")
model = AutoModelForCausalLM.from_pretrained("aaditya/Llama3-OpenBioLLM-8B")
# Define a function to generate text
def generate_text(prompt):
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_length=50)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
# Create a Gradio interface
iface = gr.Interface(
fn=generate_text,
inputs="text",
outputs="text",
title="OpenBioLLM Model",
description="Enter a prompt to generate text using the OpenBioLLM model."
)
# Launch the interface
iface.launch()