QLORA-Phi2 / app.py
wgetdd's picture
Update app.py
f8dc88c verified
import gradio as gr
from torchvision import transforms
import torch
from inference import run_inference
#####
description_zero_shot_training = """ ### Chat with Phi-2 """
# Description
title = "<center><strong><font size='8'>πŸ“Ž Phi-2 Chat (Trained on OpenAssistant/oasst1 Dataset with QLoRA) πŸ“Ž</font></strong></center>"
text_input = gr.Text(label="Enter text")
text_input2 = gr.Text(label="Generated Response")
css = "h1 { text-align: center } .about { text-align: justify; padding-left: 10%; padding-right: 10%; }"
with gr.Blocks(css=css, title='Play with CLIP') as demo:
with gr.Row():
with gr.Column(scale=1):
# Title
gr.Markdown(title)
with gr.Tab("chat_with_phi2"):
# Images
with gr.Row(variant="panel"):
with gr.Column(scale=1):
text_input.render()
with gr.Column(scale=1):
text_input2.render()
# Submit & Clear
with gr.Row():
with gr.Column():
run_chat_with_phi2_button = gr.Button("chat with phi2", variant='primary')
clear_btn_text_to_image = gr.Button("Clear", variant="secondary")
gr.Markdown(description_zero_shot_training)
gr.Examples(examples = ["What is Large Language models ?", "Can you write a short introduction about the relevance of the term monopsony in economics? Please use examples related to potential monopsonies in the labour market and cite relevant research.", "I want to start doing astrophotography as a hobby, any suggestions what could i do?"],
inputs=[text_input],
outputs=text_input2,
fn=run_inference,
cache_examples=True,
examples_per_page=4)
run_chat_with_phi2_button.click(run_inference,
inputs=[
text_input,
],
outputs=text_input2)
#######################################################################################################################
def clear():
return None, None
def clear_text():
return None, None, None
clear_btn_text_to_image.click(clear, outputs=[text_input, text_input2])
demo.queue()
demo.launch()