Vokturz commited on
Commit
1ba133e
1 Parent(s): ff369a5

update default model list

Browse files
Files changed (1) hide show
  1. src/app.py +11 -11
src/app.py CHANGED
@@ -10,8 +10,16 @@ from huggingface_hub import login
10
  st.set_page_config(page_title='Can you run it? LLM version', layout="wide", initial_sidebar_state="expanded")
11
 
12
  model_list = [
13
- "mistralai/Mistral-7B-Instruct-v0.1",
14
  "mistralai/Mixtral-8x7B-Instruct-v0.1",
 
 
 
 
 
 
 
 
15
  "deepseek-ai/deepseek-coder-6.7b-instruct",
16
  "deepseek-ai/deepseek-coder-1.3b-base",
17
  "microsoft/phi-2",
@@ -20,17 +28,9 @@ model_list = [
20
  "codellama/CodeLlama-13b-hf",
21
  "codellama/CodeLlama-34b-hf",
22
  "Phind/Phind-CodeLlama-34B-v2",
23
- "WizardLM/WizardCoder-Python-34B-V1.0",
24
  "TheBloke/Llama-2-7B-fp16",
25
  "TheBloke/Llama-2-13B-fp16",
26
  "TheBloke/Llama-2-70B-fp16",
27
- "Gryphe/MythoMax-L2-13b",
28
- "uukuguy/speechless-llama2-hermes-orca-platypus-wizardlm-13b",
29
- "lmsys/vicuna-7b-v1.5",
30
- "lmsys/vicuna-13b-v1.5-16k",
31
- "lmsys/longchat-7b-v1.5-32k",
32
- "tiiuae/falcon-7B-Instruct",
33
- "tiiuae/falcon-7B",
34
  "tiiuae/falcon-40B",
35
  "tiiuae/falcon-40B-Instruct",
36
  "tiiuae/falcon-180B",
@@ -182,8 +182,8 @@ _memory_table.columns = ['dtype', 'Variable', 'Number of GPUs']
182
  col1, col2 = st.columns([1,1.3])
183
 
184
  if gpu_vendor == "Apple":
185
- col.warning("""For M1/M2 Apple chips, PyTorch uses [Metal Performance Shaders (MPS)](https://huggingface.co/docs/accelerate/usage_guides/mps) as backend.\\
186
- Remember that Apple M1/M2 chips share memory between CPU and GPU.""", icon="⚠️")
187
  with col1:
188
  st.write(f"#### [{model_name}](https://huggingface.co/{model_name}) ({custom_ceil(memory_table.iloc[3,0],1):.1f}B)")
189
 
 
10
  st.set_page_config(page_title='Can you run it? LLM version', layout="wide", initial_sidebar_state="expanded")
11
 
12
  model_list = [
13
+ "mistral-community/Mistral-7B-v0.2",
14
  "mistralai/Mixtral-8x7B-Instruct-v0.1",
15
+ "mistral-community/Mixtral-8x22B-v0.1",
16
+ "HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
17
+ "CohereForAI/c4ai-command-r-plus",
18
+ "CohereForAI/c4ai-command-r-v01",
19
+ "hpcai-tech/grok-1",
20
+ "NexaAIDev/Octopus-v2",
21
+ "HuggingFaceH4/zephyr-7b-gemma-v0.1",
22
+ "HuggingFaceH4/starchat2-15b-v0.1",
23
  "deepseek-ai/deepseek-coder-6.7b-instruct",
24
  "deepseek-ai/deepseek-coder-1.3b-base",
25
  "microsoft/phi-2",
 
28
  "codellama/CodeLlama-13b-hf",
29
  "codellama/CodeLlama-34b-hf",
30
  "Phind/Phind-CodeLlama-34B-v2",
 
31
  "TheBloke/Llama-2-7B-fp16",
32
  "TheBloke/Llama-2-13B-fp16",
33
  "TheBloke/Llama-2-70B-fp16",
 
 
 
 
 
 
 
34
  "tiiuae/falcon-40B",
35
  "tiiuae/falcon-40B-Instruct",
36
  "tiiuae/falcon-180B",
 
182
  col1, col2 = st.columns([1,1.3])
183
 
184
  if gpu_vendor == "Apple":
185
+ col.warning("""For M1/M2/M3 Apple chips, PyTorch uses [Metal Performance Shaders (MPS)](https://huggingface.co/docs/accelerate/usage_guides/mps) as backend.\\
186
+ Remember that Apple M1/M2/M3 chips share memory between CPU and GPU.""", icon="⚠️")
187
  with col1:
188
  st.write(f"#### [{model_name}](https://huggingface.co/{model_name}) ({custom_ceil(memory_table.iloc[3,0],1):.1f}B)")
189