Vokturz commited on
Commit
7e1cbde
1 Parent(s): 8f4ec63

fixed mem_LORA's formula

Browse files
Files changed (1) hide show
  1. src/app.py +1 -1
src/app.py CHANGED
@@ -116,7 +116,7 @@ with col.expander("Information", expanded=True):
116
 
117
  st.latex(r"""\text{Memory}_\text{Inference} \approx \text{Model Size} \times 1.2""")
118
  st.markdown("""- For LoRa Fine-tuning, I'm asuming a **16-bit** dtype of trainable parameters. The formula (in terms of GB) is""")
119
- st.latex(r"\text{Memory}_\text{LoRa} \approx \text{Model Size} + \left(\text{ \# trainable Params}_\text{Billions}\times\frac{16}{8} \times 4\right) \times 1.2")
120
 
121
  access_token = st.sidebar.text_input("Access token")
122
 
 
116
 
117
  st.latex(r"""\text{Memory}_\text{Inference} \approx \text{Model Size} \times 1.2""")
118
  st.markdown("""- For LoRa Fine-tuning, I'm asuming a **16-bit** dtype of trainable parameters. The formula (in terms of GB) is""")
119
+ st.latex(r"\text{Memory}_\text{LoRa} \approx \left(\text{Model Size} + \text{ \# trainable Params}_\text{Billions}\times\frac{16}{8} \times 4\right) \times 1.2")
120
 
121
  access_token = st.sidebar.text_input("Access token")
122