TeachingPadawan / app.py
K00B404's picture
Update app.py
eb33590 verified
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
# Load your teacher model
model_name = "microsoft/Orca-2-7b" #"0x0mom/nous_gemma_r1"# "cognitivecomputations/dolphin-2_6-phi-2" # "Dizzykong/gpt2-medium-commands"#
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
def generate_response(prompt):
inputs = tokenizer(prompt, return_tensors="pt")
with torch.no_grad():
outputs = model.generate(**inputs, max_length=100)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response
interface = gr.Interface(fn=generate_response,
inputs=gr.inputs.Textbox(lines=2, placeholder="Enter your prompt here..."),
outputs="text",
title="Text Generation with Dolphin-2_6-Phi-2",
description="This model generates responses based on the input prompt. Try it out!")
if __name__ == "__main__":
interface.launch()