Vokturz commited on
Commit
e8be103
1 Parent(s): fddae32

solve a minor bug

Browse files
Files changed (1) hide show
  1. src/app.py +16 -14
src/app.py CHANGED
@@ -52,6 +52,19 @@ def get_name(index):
52
 
53
  gpu_specs = get_gpu_specs()
54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  access_token = st.sidebar.text_input("Access token")
56
  model_name = st.sidebar.text_input("Model name", value="mistralai/Mistral-7B-v0.1")
57
  if not model_name:
@@ -84,6 +97,9 @@ min_ram = gpu_info['RAM (GB)'].min()
84
  max_ram = gpu_info['RAM (GB)'].max()
85
  ram = st.sidebar.slider("Filter by RAM (GB)", min_ram, max_ram, (10.0, 40.0), step=0.5)
86
  gpu_info = gpu_info[gpu_info["RAM (GB)"].between(ram[0], ram[1])]
 
 
 
87
  gpu = st.sidebar.selectbox("GPU", gpu_info['Product Name'].index.tolist(), format_func=lambda x : gpu_specs.iloc[x]['Product Name'])
88
  gpu_spec = gpu_specs.iloc[gpu]
89
  gpu_spec.name = 'INFO'
@@ -95,20 +111,6 @@ st.sidebar.dataframe(gpu_spec.T.astype(str))
95
  memory_table = pd.DataFrame(st.session_state[model_name]).set_index('dtype')
96
  memory_table['LoRA Fine-Tuning (GB)'] = (memory_table["Total Size (GB)"] +
97
  (memory_table["Parameters (Billion)"]* lora_pct/100 * (16/8)*4)) * 1.2
98
-
99
- _, col, _ = st.columns([1,3,1])
100
- with col.expander("Information", expanded=True):
101
- st.markdown("""- GPU information comes from [TechPowerUp GPU Specs](https://www.techpowerup.com/gpu-specs/)
102
- - Mainly based on [Model Memory Calculator by hf-accelerate](https://huggingface.co/spaces/hf-accelerate/model-memory-usage)
103
- using `transformers` library
104
- - Inference is calculated following [EleutherAI Transformer Math 101](https://blog.eleuther.ai/transformer-math/),
105
- where is estimated as """)
106
-
107
- st.latex(r"""\text{Memory}_\text{Inference} \approx \text{Model Size} \times 1.2""")
108
- st.markdown("""- For LoRa Fine-tuning, I'm asuming a **16-bit** dtype of trainable parameters. The formula (in terms of GB) is""")
109
- st.latex(r"\text{Memory}_\text{LoRa} \approx \text{Model Size} + \left(\text{ \# trainable Params}_\text{Billions}\times\frac{16}{8} \times 4\right) \times 1.2")
110
- st.markdown("- You can understand `int4` as models in `GPTQ-4bit`, `AWQ-4bit` or `Q4_0 GGUF/GGML` formats")
111
-
112
 
113
  _memory_table = memory_table.copy()
114
  memory_table = memory_table.round(2).T
 
52
 
53
  gpu_specs = get_gpu_specs()
54
 
55
+ _, col, _ = st.columns([1,3,1])
56
+ with col.expander("Information", expanded=True):
57
+ st.markdown("""- GPU information comes from [TechPowerUp GPU Specs](https://www.techpowerup.com/gpu-specs/)
58
+ - Mainly based on [Model Memory Calculator by hf-accelerate](https://huggingface.co/spaces/hf-accelerate/model-memory-usage)
59
+ using `transformers` library
60
+ - Inference is calculated following [EleutherAI Transformer Math 101](https://blog.eleuther.ai/transformer-math/),
61
+ where is estimated as """)
62
+
63
+ st.latex(r"""\text{Memory}_\text{Inference} \approx \text{Model Size} \times 1.2""")
64
+ st.markdown("""- For LoRa Fine-tuning, I'm asuming a **16-bit** dtype of trainable parameters. The formula (in terms of GB) is""")
65
+ st.latex(r"\text{Memory}_\text{LoRa} \approx \text{Model Size} + \left(\text{ \# trainable Params}_\text{Billions}\times\frac{16}{8} \times 4\right) \times 1.2")
66
+ st.markdown("- You can understand `int4` as models in `GPTQ-4bit`, `AWQ-4bit` or `Q4_0 GGUF/GGML` formats")
67
+
68
  access_token = st.sidebar.text_input("Access token")
69
  model_name = st.sidebar.text_input("Model name", value="mistralai/Mistral-7B-v0.1")
70
  if not model_name:
 
97
  max_ram = gpu_info['RAM (GB)'].max()
98
  ram = st.sidebar.slider("Filter by RAM (GB)", min_ram, max_ram, (10.0, 40.0), step=0.5)
99
  gpu_info = gpu_info[gpu_info["RAM (GB)"].between(ram[0], ram[1])]
100
+ if len(gpu_info) == 0:
101
+ st.sidebar.error(f"**{gpu_vendor}** has no GPU in that RAM range")
102
+ st.stop()
103
  gpu = st.sidebar.selectbox("GPU", gpu_info['Product Name'].index.tolist(), format_func=lambda x : gpu_specs.iloc[x]['Product Name'])
104
  gpu_spec = gpu_specs.iloc[gpu]
105
  gpu_spec.name = 'INFO'
 
111
  memory_table = pd.DataFrame(st.session_state[model_name]).set_index('dtype')
112
  memory_table['LoRA Fine-Tuning (GB)'] = (memory_table["Total Size (GB)"] +
113
  (memory_table["Parameters (Billion)"]* lora_pct/100 * (16/8)*4)) * 1.2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
 
115
  _memory_table = memory_table.copy()
116
  memory_table = memory_table.round(2).T