leonardlin commited on
Commit
1a6b000
1 Parent(s): d259634

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -6
app.py CHANGED
@@ -9,8 +9,7 @@ from threading import Thread
9
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
10
 
11
  # Model
12
- model_name = "TinyLlama/TinyLlama-1.1B-Chat-v0.3"
13
- model_name = "mistralai/Mistral-7B-Instruct-v0.1"
14
 
15
  # UI Settings
16
  title = "Shisa 7B"
@@ -18,15 +17,14 @@ description = "Test out Shisa 7B in either English or Japanese."
18
  placeholder = "Type Here / ここに入力してください"
19
  examples = [
20
  "What's the best ramen in Tokyo?",
21
- "東京でおすすめのラーメン屋さんを教えていただけますか。",
22
  "東京でおすすめのラーメン屋ってどこ?",
23
  ]
24
 
25
  # LLM Settings
26
- system_prompt = 'You are a helpful, friendly assistant.'
27
  chat_history = [{"role": "system", "content": system_prompt}]
28
  tokenizer = AutoTokenizer.from_pretrained(model_name)
29
- tokenizer.chat_template = "{%- for idx in range(0, messages|length) -%}\n{%- if messages[idx]['role'] == 'user' -%}\n{%- if idx > 1 -%}\n{{- bos_token + '[INST] ' + messages[idx]['content'] + ' [/INST]' -}}\n{%- else -%}\n{{- messages[idx]['content'] + ' [/INST]' -}}\n{%- endif -%}\n{% elif messages[idx]['role'] == 'system' %}\n{{- '[INST] <<SYS>>\\n' + messages[idx]['content'] + '\\n<</SYS>>\\n\\n' -}}\n{%- elif messages[idx]['role'] == 'assistant' -%}\n{{- ' ' + messages[idx]['content'] + ' ' + eos_token -}}\n{% endif %}\n{% endfor %}\n"
30
  model = AutoModelForCausalLM.from_pretrained(
31
  model_name,
32
  torch_dtype=torch.bfloat16,
@@ -78,6 +76,6 @@ chat_interface = gr.ChatInterface(
78
  # https://huggingface.co/spaces/ysharma/Explore_llamav2_with_TGI/blob/main/app.py#L219 - we use this with construction b/c Gradio barfs on autoreload otherwise
79
  with gr.Blocks() as demo:
80
  chat_interface.render()
81
- gr.Markdown("You can try asking this question in English, formal Japanese, and informal Japanese. You might need to ask it to reply informally with something like もっと友達みたいに話そうよ。あんまり堅苦しくなくて。to get informal replies. We limit output to 200 tokens.")
82
 
83
  demo.queue().launch()
 
9
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
10
 
11
  # Model
12
+ model_name = "augmxnt/shisa-7b-v1"
 
13
 
14
  # UI Settings
15
  title = "Shisa 7B"
 
17
  placeholder = "Type Here / ここに入力してください"
18
  examples = [
19
  "What's the best ramen in Tokyo?",
20
+ "あなたは熱狂的なポケモンファンです。",
21
  "東京でおすすめのラーメン屋ってどこ?",
22
  ]
23
 
24
  # LLM Settings
25
+ system_prompt = 'あなたは役に立つアシスタントです。'
26
  chat_history = [{"role": "system", "content": system_prompt}]
27
  tokenizer = AutoTokenizer.from_pretrained(model_name)
 
28
  model = AutoModelForCausalLM.from_pretrained(
29
  model_name,
30
  torch_dtype=torch.bfloat16,
 
76
  # https://huggingface.co/spaces/ysharma/Explore_llamav2_with_TGI/blob/main/app.py#L219 - we use this with construction b/c Gradio barfs on autoreload otherwise
77
  with gr.Blocks() as demo:
78
  chat_interface.render()
79
+ gr.Markdown("You can try asking this question in Japanese or English. We limit output to 200 tokens.")
80
 
81
  demo.queue().launch()