zetavg commited on
Commit
72ff821
1 Parent(s): c620e0b

ui updates

Browse files
llama_lora/globals.py CHANGED
@@ -1,7 +1,12 @@
 
 
 
1
  from typing import Any, Dict, List, Optional, Tuple, Union
2
 
3
 
4
  class Global:
 
 
5
  base_model: str = ""
6
  data_dir: str = ""
7
  load_8bit: bool = False
@@ -15,3 +20,32 @@ class Global:
15
  ui_subtitle: str = "Toolkit for examining and fine-tuning LLaMA models using low-rank adaptation (LoRA)."
16
  ui_show_sys_info: bool = True
17
  ui_dev_mode: bool = False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import subprocess
3
+
4
  from typing import Any, Dict, List, Optional, Tuple, Union
5
 
6
 
7
  class Global:
8
+ version = None
9
+
10
  base_model: str = ""
11
  data_dir: str = ""
12
  load_8bit: bool = False
 
20
  ui_subtitle: str = "Toolkit for examining and fine-tuning LLaMA models using low-rank adaptation (LoRA)."
21
  ui_show_sys_info: bool = True
22
  ui_dev_mode: bool = False
23
+
24
+
25
+ def get_package_dir():
26
+ current_file_path = os.path.abspath(__file__)
27
+ parent_directory_path = os.path.dirname(current_file_path)
28
+ return os.path.abspath(parent_directory_path)
29
+
30
+
31
+ def get_git_commit_hash():
32
+ try:
33
+ original_cwd = os.getcwd()
34
+ project_dir = get_package_dir()
35
+ try:
36
+ os.chdir(project_dir)
37
+ commit_hash = subprocess.check_output(
38
+ ['git', 'rev-parse', 'HEAD']).strip().decode('utf-8')
39
+ return commit_hash
40
+ except Exception as e:
41
+ print(f"Cannot get git commit hash: {e}")
42
+ finally:
43
+ os.chdir(original_cwd)
44
+ except Exception as e:
45
+ print(f"Cannot get git commit hash: {e}")
46
+
47
+
48
+ commit_hash = get_git_commit_hash()
49
+
50
+ if commit_hash:
51
+ Global.version = commit_hash[:8]
llama_lora/ui/inference_ui.py CHANGED
@@ -173,7 +173,8 @@ def inference_ui():
173
  with gr.Row():
174
  with gr.Column():
175
  with gr.Column(elem_id="inference_prompt_box"):
176
- variable_0 = gr.Textbox(lines=2, label="Prompt", placeholder="Tell me about alpecas and llamas.")
 
177
  variable_1 = gr.Textbox(lines=2, label="", visible=False)
178
  variable_2 = gr.Textbox(lines=2, label="", visible=False)
179
  variable_3 = gr.Textbox(lines=2, label="", visible=False)
@@ -186,15 +187,16 @@ def inference_ui():
186
  preview_prompt = gr.Textbox(
187
  show_label=False, interactive=False, elem_id="inference_preview_prompt")
188
 
189
- with gr.Column():
190
- with gr.Row():
191
- generate_btn = gr.Button(
192
- "Generate", variant="primary", label="Generate", elem_id="inference_generate_btn",
193
- )
194
- stop_btn = gr.Button(
195
- "Stop", variant="stop", label="Stop Iterating", elem_id="inference_stop_btn")
196
 
197
- with gr.Column():
 
198
  temperature = gr.Slider(
199
  minimum=0.01, maximum=1.99, value=0.1, step=0.01,
200
  label="Temperature",
@@ -236,6 +238,15 @@ def inference_ui():
236
  elem_id="inference_stream_output",
237
  value=True
238
  )
 
 
 
 
 
 
 
 
 
239
  with gr.Column():
240
  inference_output = gr.Textbox(
241
  lines=12, label="Output", elem_id="inference_output")
@@ -265,7 +276,8 @@ def inference_ui():
265
  max_new_tokens,
266
  stream_output,
267
  ],
268
- outputs=inference_output
 
269
  )
270
  stop_btn.click(fn=None, inputs=None, outputs=None,
271
  cancels=[generate_event])
@@ -330,7 +342,7 @@ def inference_ui():
330
  placement: 'right',
331
  delay: [500, 0],
332
  animation: 'scale-subtle',
333
- content: 'This is the input that will actually be sent to the language model.',
334
  });
335
  });
336
 
@@ -385,7 +397,7 @@ def inference_ui():
385
 
386
  }, 100);
387
 
388
- // Show/hide generate and save button base on the state.
389
  setTimeout(function () {
390
  // Make the '#inference_output > .wrap' element appear
391
  document.getElementById("inference_stop_btn").click();
 
173
  with gr.Row():
174
  with gr.Column():
175
  with gr.Column(elem_id="inference_prompt_box"):
176
+ variable_0 = gr.Textbox(
177
+ lines=2, label="Prompt", placeholder="Tell me about alpecas and llamas.")
178
  variable_1 = gr.Textbox(lines=2, label="", visible=False)
179
  variable_2 = gr.Textbox(lines=2, label="", visible=False)
180
  variable_3 = gr.Textbox(lines=2, label="", visible=False)
 
187
  preview_prompt = gr.Textbox(
188
  show_label=False, interactive=False, elem_id="inference_preview_prompt")
189
 
190
+ # with gr.Column():
191
+ # with gr.Row():
192
+ # generate_btn = gr.Button(
193
+ # "Generate", variant="primary", label="Generate", elem_id="inference_generate_btn",
194
+ # )
195
+ # stop_btn = gr.Button(
196
+ # "Stop", variant="stop", label="Stop Iterating", elem_id="inference_stop_btn")
197
 
198
+ # with gr.Column():
199
+ with gr.Accordion("Options", open=True, elem_id="inference_options_accordion"):
200
  temperature = gr.Slider(
201
  minimum=0.01, maximum=1.99, value=0.1, step=0.01,
202
  label="Temperature",
 
238
  elem_id="inference_stream_output",
239
  value=True
240
  )
241
+
242
+ with gr.Column():
243
+ with gr.Row():
244
+ generate_btn = gr.Button(
245
+ "Generate", variant="primary", label="Generate", elem_id="inference_generate_btn",
246
+ )
247
+ stop_btn = gr.Button(
248
+ "Stop", variant="stop", label="Stop Iterating", elem_id="inference_stop_btn")
249
+
250
  with gr.Column():
251
  inference_output = gr.Textbox(
252
  lines=12, label="Output", elem_id="inference_output")
 
276
  max_new_tokens,
277
  stream_output,
278
  ],
279
+ outputs=inference_output,
280
+ api_name="inference"
281
  )
282
  stop_btn.click(fn=None, inputs=None, outputs=None,
283
  cancels=[generate_event])
 
342
  placement: 'right',
343
  delay: [500, 0],
344
  animation: 'scale-subtle',
345
+ content: 'This is the prompt that will be sent to the language model.',
346
  });
347
  });
348
 
 
397
 
398
  }, 100);
399
 
400
+ // Show/hide generate and stop button base on the state.
401
  setTimeout(function () {
402
  // Make the '#inference_output > .wrap' element appear
403
  document.getElementById("inference_stop_btn").click();
llama_lora/ui/main_page.py CHANGED
@@ -25,10 +25,15 @@ def main_page():
25
  inference_ui()
26
  with gr.Tab("Fine-tuning"):
27
  finetune_ui()
 
 
 
 
28
  if Global.ui_show_sys_info:
29
- gr.Markdown(f"""
30
- <small>Data dir: `{Global.data_dir}`</small>
31
- """)
 
32
  main_page_blocks.load(_js=f"""
33
  function () {{
34
  {popperjs_core_code()}
@@ -121,6 +126,23 @@ def main_page_custom_css():
121
  border-top-right-radius: 0;
122
  }
123
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
  #dataset_plain_text_input_variables_separator textarea,
125
  #dataset_plain_text_input_and_output_separator textarea,
126
  #dataset_plain_text_data_separator textarea {
 
25
  inference_ui()
26
  with gr.Tab("Fine-tuning"):
27
  finetune_ui()
28
+ info = []
29
+ if Global.version:
30
+ info.append(f"LLaMA-LoRA `{Global.version}`")
31
+ info.append(f"Base model: `{Global.base_model}`")
32
  if Global.ui_show_sys_info:
33
+ info.append(f"Data dir: `{Global.data_dir}`")
34
+ gr.Markdown(f"""
35
+ <small>{"&nbsp;&nbsp;·&nbsp;&nbsp;".join(info)}</small>
36
+ """)
37
  main_page_blocks.load(_js=f"""
38
  function () {{
39
  {popperjs_core_code()}
 
126
  border-top-right-radius: 0;
127
  }
128
 
129
+ #inference_options_accordion {
130
+ padding: 0;
131
+ }
132
+ #inference_options_accordion > .label-wrap {
133
+ user-select: none;
134
+ padding: var(--block-padding);
135
+ margin-bottom: 0;
136
+ }
137
+ #inference_options_accordion > *:last-child > .form {
138
+ border-left: 0;
139
+ border-right: 0;
140
+ border-bottom: 0;
141
+ border-top-left-radius: 0;
142
+ border-top-right-radius: 0;
143
+ box-shadow: none;
144
+ }
145
+
146
  #dataset_plain_text_input_variables_separator textarea,
147
  #dataset_plain_text_input_and_output_separator textarea,
148
  #dataset_plain_text_data_separator textarea {