chansung's picture
Update app.py
c651799
raw history blame
No virus
8.16 kB
import gradio as gr
from styles import MODEL_SELECTION_CSS
from js import GET_LOCAL_STORAGE, UPDATE_LEFT_BTNS_STATE, UPDATE_PLACEHOLDERS
from templates import templates
chl_file = open("channels.txt", "r")
channels = chl_file.read().split("\n")
channel_btns = []
with gr.Blocks(css=MODEL_SELECTION_CSS, theme='gradio/soft') as demo:
with gr.Column(visible=False) as chat_view:
idx = gr.State(0)
chat_state = gr.State()
local_data = gr.JSON({}, visible=False)
with gr.Row():
with gr.Column(scale=1, min_width=180):
gr.Markdown("GradioChat", elem_id="left-top")
with gr.Column(elem_id="left-pane"):
chat_back_btn = gr.Button("Back", elem_id="chat-back-btn")
with gr.Accordion("Histories", elem_id="chat-history-accordion", open=False):
channel_btns.append(gr.Button(channels[0], elem_classes=["custom-btn-highlight"]))
for channel in channels[1:]:
channel_btns.append(gr.Button(channel, elem_classes=["custom-btn"]))
with gr.Column(scale=8, elem_id="right-pane"):
with gr.Column(
elem_id="initial-popup", visible=False
) as example_block:
with gr.Row(scale=1):
with gr.Column(elem_id="initial-popup-left-pane"):
gr.Markdown("GradioChat", elem_id="initial-popup-title")
gr.Markdown("Making the community's best AI chat models available to everyone.")
with gr.Column(elem_id="initial-popup-right-pane"):
gr.Markdown("Chat UI is now open sourced on Hugging Face Hub")
gr.Markdown("check out the [β†— repository](https://huggingface.co/spaces/chansung/test-multi-conv)")
with gr.Column(scale=1):
gr.Markdown("Examples")
with gr.Row():
for example in examples:
ex_btns.append(gr.Button(example, elem_classes=["example-btn"]))
with gr.Column(elem_id="aux-btns-popup", visible=True):
with gr.Row():
stop = gr.Button("Stop", elem_classes=["aux-btn"])
regenerate = gr.Button("Regen", interactive=False, elem_classes=["aux-btn"])
clean = gr.Button("Clean", elem_classes=["aux-btn"])
with gr.Accordion("Context Inspector", elem_id="aux-viewer", open=False):
context_inspector = gr.Textbox(
"",
elem_id="aux-viewer-inspector",
label="",
lines=30,
max_lines=50,
)
chatbot = gr.Chatbot(elem_id='chatbot')
instruction_txtbox = gr.Textbox(placeholder="Ask anything", label="", elem_id="prompt-txt")
with gr.Accordion("Example Templates", open=False):
template_txt = gr.Textbox(visible=False)
template_md = gr.Markdown(label="Chosen Template", visible=False, elem_classes="template-txt")
with gr.Row():
placeholder_txt1 = gr.Textbox(label="placeholder #1", visible=False, interactive=True)
placeholder_txt2 = gr.Textbox(label="placeholder #2", visible=False, interactive=True)
placeholder_txt3 = gr.Textbox(label="placeholder #3", visible=False, interactive=True)
for template in templates:
with gr.Tab(template['title']):
gr.Examples(
template['template'],
inputs=[template_txt],
outputs=[template_md, placeholder_txt1, placeholder_txt2, placeholder_txt3, instruction_txtbox],
run_on_click=True,
fn=fill_up_placeholders,
)
with gr.Accordion("Control Panel", open=False) as control_panel:
with gr.Column():
with gr.Column():
gr.Markdown("#### Global context")
with gr.Accordion("global context will persist during conversation, and it is placed at the top of the prompt", open=False):
global_context = gr.Textbox(
"global context",
lines=5,
max_lines=10,
interactive=True,
elem_id="global-context"
)
gr.Markdown("#### Internet search")
with gr.Row():
internet_option = gr.Radio(choices=["on", "off"], value="off", label="mode")
serper_api_key = gr.Textbox(
value= "" if args.serper_api_key is None else args.serper_api_key,
placeholder="Get one by visiting serper.dev",
label="Serper api key"
)
gr.Markdown("#### GenConfig for **response** text generation")
with gr.Row():
res_temp = gr.Slider(0.0, 2.0, 0, step=0.1, label="temp", interactive=True)
res_topp = gr.Slider(0.0, 2.0, 0, step=0.1, label="top_p", interactive=True)
res_topk = gr.Slider(20, 1000, 0, step=1, label="top_k", interactive=True)
res_rpen = gr.Slider(0.0, 2.0, 0, step=0.1, label="rep_penalty", interactive=True)
res_mnts = gr.Slider(64, 8192, 0, step=1, label="new_tokens", interactive=True)
res_beams = gr.Slider(1, 4, 0, step=1, label="beams")
res_cache = gr.Radio([True, False], value=0, label="cache", interactive=True)
res_sample = gr.Radio([True, False], value=0, label="sample", interactive=True)
res_eosid = gr.Number(value=0, visible=False, precision=0)
res_padid = gr.Number(value=0, visible=False, precision=0)
with gr.Column(visible=False):
gr.Markdown("#### GenConfig for **summary** text generation")
with gr.Row():
sum_temp = gr.Slider(0.0, 2.0, 0, step=0.1, label="temp", interactive=True)
sum_topp = gr.Slider(0.0, 2.0, 0, step=0.1, label="top_p", interactive=True)
sum_topk = gr.Slider(20, 1000, 0, step=1, label="top_k", interactive=True)
sum_rpen = gr.Slider(0.0, 2.0, 0, step=0.1, label="rep_penalty", interactive=True)
sum_mnts = gr.Slider(64, 8192, 0, step=1, label="new_tokens", interactive=True)
sum_beams = gr.Slider(1, 8, 0, step=1, label="beams", interactive=True)
sum_cache = gr.Radio([True, False], value=0, label="cache", interactive=True)
sum_sample = gr.Radio([True, False], value=0, label="sample", interactive=True)
sum_eosid = gr.Number(value=0, visible=False, precision=0)
sum_padid = gr.Number(value=0, visible=False, precision=0)
with gr.Column():
gr.Markdown("#### Context managements")
with gr.Row():
ctx_num_lconv = gr.Slider(2, 10, 3, step=1, label="number of recent talks to keep", interactive=True)
ctx_sum_prompt = gr.Textbox(
"summarize our conversations. what have we discussed about so far?",
label="design a prompt to summarize the conversations",
visible=False
)
demo.launch()