pseudotensor commited on
Commit
876e36c
1 Parent(s): 09063a8

Update with h2oGPT hash 5bf309e905fe1b771229dd0f61b0582ff883b41d

Browse files
Files changed (1) hide show
  1. app.py +14 -11
app.py CHANGED
@@ -648,6 +648,14 @@ body.dark{background:linear-gradient(#0d0d0d,#333333);}"""
648
 
649
  Chatbot._postprocess_chat_messages = _postprocess_chat_messages
650
 
 
 
 
 
 
 
 
 
651
  demo = gr.Blocks(theme=gr.themes.Soft(**colors_dict), css=css_code, title="h2oGPT", analytics_enabled=False)
652
  callback = gr.CSVLogger()
653
  # css_code = 'body{background-image:url("https://h2o.ai/content/experience-fragments/h2o/us/en/site/header/master/_jcr_content/root/container/header_copy/logo.coreimg.svg/1678976605175/h2o-logo.svg");}'
@@ -737,7 +745,7 @@ body.dark{background:linear-gradient(#0d0d0d,#333333);}"""
737
  lines=4, label=instruction_label,
738
  placeholder=kwargs['placeholder_instruction'],
739
  )
740
- with gr.Row(): # .style(equal_height=False, equal_width=False):
741
  submit = gr.Button(value='Submit').style(full_width=False, size='sm')
742
  stop_btn = gr.Button(value="Stop").style(full_width=False, size='sm')
743
  with gr.Row():
@@ -841,7 +849,7 @@ body.dark{background:linear-gradient(#0d0d0d,#333333);}"""
841
  n_gpus = torch.cuda.device_count()
842
  n_gpus_list = [str(x) for x in list(range(-1, n_gpus))]
843
  with gr.Column():
844
- with gr.Row(scale=1):
845
  with gr.Column(scale=50):
846
  model_choice = gr.Dropdown(model_options_state.value[0], label="Choose Model",
847
  value=kwargs['base_model'])
@@ -860,7 +868,7 @@ body.dark{background:linear-gradient(#0d0d0d,#333333);}"""
860
  model_used = gr.Textbox(label="Current Model", value=kwargs['base_model'])
861
  lora_used = gr.Textbox(label="Current LORA", value=kwargs['lora_weights'],
862
  visible=kwargs['show_lora'])
863
- with gr.Row(scale=1):
864
  with gr.Column(scale=50):
865
  new_model = gr.Textbox(label="New Model HF name/path")
866
  new_lora = gr.Textbox(label="New LORA HF name/path", visible=kwargs['show_lora'])
@@ -869,7 +877,7 @@ body.dark{background:linear-gradient(#0d0d0d,#333333);}"""
869
  add_lora_button = gr.Button("Add new LORA name", visible=kwargs['show_lora'])
870
  col_model2 = gr.Column(visible=False)
871
  with col_model2:
872
- with gr.Row(scale=1):
873
  with gr.Column(scale=50):
874
  model_choice2 = gr.Dropdown(model_options_state.value[0], label="Choose Model 2",
875
  value=no_model_str)
@@ -944,13 +952,7 @@ body.dark{background:linear-gradient(#0d0d0d,#333333);}"""
944
  None,
945
  None,
946
  None,
947
- _js="""() => {
948
- if (document.querySelectorAll('.dark').length) {
949
- document.querySelectorAll('.dark').forEach(el => el.classList.remove('dark'));
950
- } else {
951
- document.querySelector('body').classList.add('dark');
952
- }
953
- }""",
954
  api_name="dark",
955
  )
956
 
@@ -1414,6 +1416,7 @@ body.dark{background:linear-gradient(#0d0d0d,#333333);}"""
1414
  stop_btn.click(lambda: None, None, None,
1415
  cancels=[submit_event_nochat, submit_event, submit_event2, submit_event3],
1416
  queue=False, api_name='stop').then(clear_torch_cache)
 
1417
 
1418
  demo.queue(concurrency_count=1)
1419
  favicon_path = "h2o-logo.svg"
 
648
 
649
  Chatbot._postprocess_chat_messages = _postprocess_chat_messages
650
 
651
+ dark_js = """() => {
652
+ if (document.querySelectorAll('.dark').length) {
653
+ document.querySelectorAll('.dark').forEach(el => el.classList.remove('dark'));
654
+ } else {
655
+ document.querySelector('body').classList.add('dark');
656
+ }
657
+ }"""
658
+
659
  demo = gr.Blocks(theme=gr.themes.Soft(**colors_dict), css=css_code, title="h2oGPT", analytics_enabled=False)
660
  callback = gr.CSVLogger()
661
  # css_code = 'body{background-image:url("https://h2o.ai/content/experience-fragments/h2o/us/en/site/header/master/_jcr_content/root/container/header_copy/logo.coreimg.svg/1678976605175/h2o-logo.svg");}'
 
745
  lines=4, label=instruction_label,
746
  placeholder=kwargs['placeholder_instruction'],
747
  )
748
+ with gr.Row():
749
  submit = gr.Button(value='Submit').style(full_width=False, size='sm')
750
  stop_btn = gr.Button(value="Stop").style(full_width=False, size='sm')
751
  with gr.Row():
 
849
  n_gpus = torch.cuda.device_count()
850
  n_gpus_list = [str(x) for x in list(range(-1, n_gpus))]
851
  with gr.Column():
852
+ with gr.Row():
853
  with gr.Column(scale=50):
854
  model_choice = gr.Dropdown(model_options_state.value[0], label="Choose Model",
855
  value=kwargs['base_model'])
 
868
  model_used = gr.Textbox(label="Current Model", value=kwargs['base_model'])
869
  lora_used = gr.Textbox(label="Current LORA", value=kwargs['lora_weights'],
870
  visible=kwargs['show_lora'])
871
+ with gr.Row():
872
  with gr.Column(scale=50):
873
  new_model = gr.Textbox(label="New Model HF name/path")
874
  new_lora = gr.Textbox(label="New LORA HF name/path", visible=kwargs['show_lora'])
 
877
  add_lora_button = gr.Button("Add new LORA name", visible=kwargs['show_lora'])
878
  col_model2 = gr.Column(visible=False)
879
  with col_model2:
880
+ with gr.Row():
881
  with gr.Column(scale=50):
882
  model_choice2 = gr.Dropdown(model_options_state.value[0], label="Choose Model 2",
883
  value=no_model_str)
 
952
  None,
953
  None,
954
  None,
955
+ _js=dark_js,
 
 
 
 
 
 
956
  api_name="dark",
957
  )
958
 
 
1416
  stop_btn.click(lambda: None, None, None,
1417
  cancels=[submit_event_nochat, submit_event, submit_event2, submit_event3],
1418
  queue=False, api_name='stop').then(clear_torch_cache)
1419
+ demo.load(None,None,None,_js=dark_js)
1420
 
1421
  demo.queue(concurrency_count=1)
1422
  favicon_path = "h2o-logo.svg"