Vokturz commited on
Commit
5c9514a
1 Parent(s): d37299b

added a list of some models

Browse files
Files changed (2) hide show
  1. requirements.txt +1 -0
  2. src/app.py +31 -1
requirements.txt CHANGED
@@ -3,4 +3,5 @@ transformers @ git+https://github.com/huggingface/transformers
3
  huggingface_hub
4
  pandas
5
  plotly
 
6
  einops==0.6.1
 
3
  huggingface_hub
4
  pandas
5
  plotly
6
+ streamlit-datalist
7
  einops==0.6.1
src/app.py CHANGED
@@ -1,4 +1,5 @@
1
  import streamlit as st
 
2
  import pandas as pd
3
  from utils import extract_from_url, get_model, calculate_memory
4
  import plotly.express as px
@@ -7,6 +8,33 @@ import gc
7
 
8
  st.set_page_config(page_title='Can you run it? LLM version', layout="wide", initial_sidebar_state="expanded")
9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  st.title("Can you run it? LLM version")
11
 
12
  percentage_width_main = 80
@@ -77,7 +105,9 @@ with col.expander("Information", expanded=True):
77
  st.latex(r"\text{Memory}_\text{LoRa} \approx \text{Model Size} + \left(\text{ \# trainable Params}_\text{Billions}\times\frac{16}{8} \times 4\right) \times 1.2")
78
 
79
  access_token = st.sidebar.text_input("Access token")
80
- model_name = st.sidebar.text_input("Model name", value="mistralai/Mistral-7B-v0.1")
 
 
81
  if not model_name:
82
  st.info("Please enter a model name")
83
  st.stop()
 
1
  import streamlit as st
2
+ from streamlit_datalist import stDatalist
3
  import pandas as pd
4
  from utils import extract_from_url, get_model, calculate_memory
5
  import plotly.express as px
 
8
 
9
  st.set_page_config(page_title='Can you run it? LLM version', layout="wide", initial_sidebar_state="expanded")
10
 
11
+ model_list = [
12
+ "mistralai/Mistral-7B-v0.1",
13
+ "mistralai/Mistral-7B-Instruct-v0.1",
14
+ "ehartford/samantha-mistral-7b",
15
+ "SkunkworksAI/Mistralic-7B-1",
16
+ "microsoft/phi-1_5",
17
+ "PY007/TinyLlama-1.1B-intermediate-step-480k-1T"
18
+ "codellama/CodeLlama-7b-hf",
19
+ "codellama/CodeLlama-13b-hf",
20
+ "codellama/CodeLlama-34b-hf",
21
+ "Phind/Phind-CodeLlama-34B-v2",
22
+ "WizardLM/WizardCoder-Python-34B-V1.0",
23
+ "TheBloke/Llama-2-7B-fp16",
24
+ "TheBloke/Llama-2-13B-fp16",
25
+ "TheBloke/Llama-2-70B-fp16",
26
+ "Gryphe/MythoMax-L2-13b",
27
+ "uukuguy/speechless-llama2-hermes-orca-platypus-wizardlm-13b",
28
+ "lmsys/vicuna-7b-v1.5",
29
+ "lmsys/vicuna-13b-v1.5-16k",
30
+ "lmsys/longchat-7b-v1.5-32k",
31
+ "tiiuae/falcon-7B-Instruct",
32
+ "tiiuae/falcon-7B",
33
+ "tiiuae/falcon-40B",
34
+ "tiiuae/falcon-40B-Instruct",
35
+ "tiiuae/falcon-180B",
36
+ "tiiuae/falcon-180B-Chat",
37
+ ]
38
  st.title("Can you run it? LLM version")
39
 
40
  percentage_width_main = 80
 
105
  st.latex(r"\text{Memory}_\text{LoRa} \approx \text{Model Size} + \left(\text{ \# trainable Params}_\text{Billions}\times\frac{16}{8} \times 4\right) \times 1.2")
106
 
107
  access_token = st.sidebar.text_input("Access token")
108
+ #model_name = st.sidebar.text_input("Model name", value="mistralai/Mistral-7B-v0.1")
109
+ with st.sidebar.container():
110
+ model_name = stDatalist("Model name (Press Enter to apply)", model_list, index=0)
111
  if not model_name:
112
  st.info("Please enter a model name")
113
  st.stop()