Llama_chat / app.py
abdulmalek9's picture
f
695e908
raw
history blame
397 Bytes
import gradio as gr
def greet(name):
return str(int(name)+10)
iface = gr.Interface(fn=greet, inputs="text", outputs="text")
iface.launch()
# Load model directly
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf")
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf")
print('')