gsarti commited on
Commit
3a616f6
β€’
1 Parent(s): cd7e21b

Add Phi3 preset

Browse files
Files changed (2) hide show
  1. app.py +24 -8
  2. presets.py +10 -0
app.py CHANGED
@@ -22,7 +22,7 @@ from contents import (
22
  from gradio_highlightedtextbox import HighlightedTextbox
23
  from gradio_modal import Modal
24
  from presets import (
25
- set_chatml_preset,
26
  set_cora_preset,
27
  set_default_preset,
28
  set_mbart_mmt_preset,
@@ -244,10 +244,14 @@ with gr.Blocks(css=custom_css) as demo:
244
  "Preset for the <a href='https://huggingface.co/gsarti/cora_mgen' target='_blank'>CORA Multilingual QA</a> model.\nUses special templates for inputs."
245
  )
246
  with gr.Column():
247
- chatml_template = gr.Button("Qwen ChatML", variant="secondary")
248
  gr.Markdown(
249
- "Preset for models using the <a href='https://github.com/MicrosoftDocs/azure-docs/blob/main/articles/ai-services/openai/includes/chat-markup-language.md' target='_blank'>ChatML conversational template</a>.\nUses <code><|im_start|></code>, <code><|im_end|></code> special tokens."
250
  )
 
 
 
 
251
  with gr.Row(equal_height=True):
252
  with gr.Column(scale=1):
253
  mbart_mmt_template = gr.Button(
@@ -289,7 +293,7 @@ with gr.Blocks(css=custom_css) as demo:
289
  )
290
  gr.Markdown(
291
  "Preset for models using the <a href='https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2' target='_blank'>Mistral Instruct template</a>.\nUses <code>[INST]...[/INST]</code> special tokens."
292
- )
293
  gr.Markdown("## βš™οΈ PECoRe Parameters")
294
  with gr.Row(equal_height=True):
295
  with gr.Column():
@@ -592,8 +596,8 @@ with gr.Blocks(css=custom_css) as demo:
592
 
593
  check_enable_large_models.input(
594
  lambda checkbox, *buttons: [gr.Button(interactive=checkbox) for _ in buttons],
595
- inputs=[check_enable_large_models, zephyr_preset, towerinstruct_template, gemma_template, mistral_instruct_template],
596
- outputs=[zephyr_preset, towerinstruct_template, gemma_template, mistral_instruct_template],
597
  )
598
 
599
  outputs_to_reset = [
@@ -646,11 +650,23 @@ with gr.Blocks(css=custom_css) as demo:
646
  outputs=[model_name_or_path, input_template, output_template, tokenizer_kwargs],
647
  ).success(preload_model, inputs=load_model_args, cancels=load_model_event)
648
 
649
- chatml_template.click(**reset_kwargs).then(
650
- set_chatml_preset,
 
 
 
 
 
 
 
 
 
 
 
651
  outputs=[
652
  model_name_or_path,
653
  input_template,
 
654
  contextless_input_template,
655
  special_tokens_to_keep,
656
  generation_kwargs,
 
22
  from gradio_highlightedtextbox import HighlightedTextbox
23
  from gradio_modal import Modal
24
  from presets import (
25
+ set_phi3_preset,
26
  set_cora_preset,
27
  set_default_preset,
28
  set_mbart_mmt_preset,
 
244
  "Preset for the <a href='https://huggingface.co/gsarti/cora_mgen' target='_blank'>CORA Multilingual QA</a> model.\nUses special templates for inputs."
245
  )
246
  with gr.Column():
247
+ phi3_preset = gr.Button("Phi-3", variant="secondary", variant="secondary", interactive=False)
248
  gr.Markdown(
249
+ "Preset for the <a href='https://huggingface.co/phi3/phi3-chatbot' target='_blank'>Phi-3</a> conversational model.\nUses <code><|user|></code>, <code><|system|></code> and <code><|assistant|></code> and <code><|end|></code> special tokens."
250
  )
251
+ #chatml_template = gr.Button("Qwen ChatML", variant="secondary")
252
+ #gr.Markdown(
253
+ # "Preset for models using the <a href='https://github.com/MicrosoftDocs/azure-docs/blob/main/articles/ai-services/openai/includes/chat-markup-language.md' target='_blank'>ChatML conversational template</a>.\nUses <code><|im_start|></code>, <code><|im_end|></code> special tokens."
254
+ #)
255
  with gr.Row(equal_height=True):
256
  with gr.Column(scale=1):
257
  mbart_mmt_template = gr.Button(
 
293
  )
294
  gr.Markdown(
295
  "Preset for models using the <a href='https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2' target='_blank'>Mistral Instruct template</a>.\nUses <code>[INST]...[/INST]</code> special tokens."
296
+ )
297
  gr.Markdown("## βš™οΈ PECoRe Parameters")
298
  with gr.Row(equal_height=True):
299
  with gr.Column():
 
596
 
597
  check_enable_large_models.input(
598
  lambda checkbox, *buttons: [gr.Button(interactive=checkbox) for _ in buttons],
599
+ inputs=[check_enable_large_models, phi3_preset, zephyr_preset, towerinstruct_template, gemma_template, mistral_instruct_template],
600
+ outputs=[phi3_preset, zephyr_preset, towerinstruct_template, gemma_template, mistral_instruct_template],
601
  )
602
 
603
  outputs_to_reset = [
 
650
  outputs=[model_name_or_path, input_template, output_template, tokenizer_kwargs],
651
  ).success(preload_model, inputs=load_model_args, cancels=load_model_event)
652
 
653
+ # chatml_template.click(**reset_kwargs).then(
654
+ # set_chatml_preset,
655
+ # outputs=[
656
+ # model_name_or_path,
657
+ # input_template,
658
+ # contextless_input_template,
659
+ # special_tokens_to_keep,
660
+ # generation_kwargs,
661
+ # ],
662
+ # ).success(preload_model, inputs=load_model_args, cancels=load_model_event)
663
+
664
+ phi3_preset.click(**reset_kwargs).then(
665
+ set_phi3_preset,
666
  outputs=[
667
  model_name_or_path,
668
  input_template,
669
+ decoder_input_output_separator,
670
  contextless_input_template,
671
  special_tokens_to_keep,
672
  generation_kwargs,
presets.py CHANGED
@@ -92,6 +92,16 @@ def set_mistral_instruct_preset():
92
  '{\n\t"max_new_tokens": 50\n}', # generation_kwargs
93
  )
94
 
 
 
 
 
 
 
 
 
 
 
95
  def update_code_snippets_fn(
96
  input_current_text: str,
97
  input_context_text: str,
 
92
  '{\n\t"max_new_tokens": 50\n}', # generation_kwargs
93
  )
94
 
95
+ def set_phi3_preset():
96
+ return (
97
+ "microsoft/Phi-3-mini-4k-instruct", # model_name_or_path
98
+ "<|system|>\n{system_prompt}<|end|>\n<|user|>\n{context}\n\n{current}<|end|>\n<|assistant|>".replace("{system_prompt}", SYSTEM_PROMPT), # input_template
99
+ "\n", # decoder_input_output_separator
100
+ "<|system|>\n{system_prompt}<|end|>\n<|user|>\n{current}<|end|>\n<|assistant|>".replace("{system_prompt}", SYSTEM_PROMPT), # input_current_text_template
101
+ ["<|system|>", "<|end|>", "<|assistant|>", "<|user|>"], # special_tokens_to_keep
102
+ '{\n\t"max_new_tokens": 50\n}', # generation_kwargs
103
+ )
104
+
105
  def update_code_snippets_fn(
106
  input_current_text: str,
107
  input_context_text: str,