dolphin-mistral / app.py
ryanrey's picture
requirements.txt
25e0a97 verified
raw
history blame
656 Bytes
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
model_name = "cognitivecomputations/dolphin-2.8-mistral-7b-v02"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
def generate_text(prompt):
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_length=200)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
iface = gr.Interface(
fn=generate_text,
inputs="text",
outputs="text",
title="Dolphin Mistral Demo",
description="Enter a prompt and get a generated response."
)
iface.launch()