MatteoScript commited on
Commit
3ee8c80
1 Parent(s): bc631cf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -6
app.py CHANGED
@@ -57,7 +57,7 @@ def init_state() :
57
  st.session_state.repetion_penalty = 1
58
 
59
  if "rag_enabled" not in st.session_state :
60
- st.session_state.rag_enabled = False
61
 
62
  if "chat_bot" not in st.session_state :
63
  st.session_state.chat_bot = "Mixtral 8x7B v0.1"
@@ -80,7 +80,6 @@ def sidebar() :
80
  st.session_state.chat_bot = st.sidebar.radio('Seleziona Modello:', [key for key, value in CHAT_BOTS.items() ])
81
  st.session_state.temp = st.slider(label="Creatività", min_value=0.0, max_value=1.0, step=0.1, value=0.9)
82
  st.session_state.max_tokens = st.slider(label="Lunghezza Output", min_value = 64, max_value=2048, step= 32, value=512)
83
- st.session_state.repetion_penalty = st.slider(label="Penalità Ripetizione", min_value=0., max_value=1., step=0.1, value=1. )
84
 
85
  with st.sidebar:
86
  retrieval_settings()
@@ -127,12 +126,16 @@ def stream_handler(chat_stream, placeholder) :
127
  tokens_per_second = total_tokens_processed // elapsed_time
128
  len_response = (len(prompt.split()) + len(full_response.split())) * 1.25
129
  col1, col2, col3 = st.columns(3)
130
-
131
  with col1 :
132
- st.write(f"**{tokens_per_second} token/secondi**")
133
-
134
  with col2 :
135
  st.write(f"**{int(len_response)} tokens generati**")
 
 
 
 
136
 
137
  return full_response
138
 
@@ -161,4 +164,4 @@ if prompt := st.chat_input("Chatta con BonsiAI..."):
161
 
162
  st.session_state.history.append([prompt, full_response])
163
  st.session_state.messages.append({"role": "assistant", "content": full_response})
164
- st.success('Generazione Completata', icon="✅")
 
57
  st.session_state.repetion_penalty = 1
58
 
59
  if "rag_enabled" not in st.session_state :
60
+ st.session_state.rag_enabled = True
61
 
62
  if "chat_bot" not in st.session_state :
63
  st.session_state.chat_bot = "Mixtral 8x7B v0.1"
 
80
  st.session_state.chat_bot = st.sidebar.radio('Seleziona Modello:', [key for key, value in CHAT_BOTS.items() ])
81
  st.session_state.temp = st.slider(label="Creatività", min_value=0.0, max_value=1.0, step=0.1, value=0.9)
82
  st.session_state.max_tokens = st.slider(label="Lunghezza Output", min_value = 64, max_value=2048, step= 32, value=512)
 
83
 
84
  with st.sidebar:
85
  retrieval_settings()
 
126
  tokens_per_second = total_tokens_processed // elapsed_time
127
  len_response = (len(prompt.split()) + len(full_response.split())) * 1.25
128
  col1, col2, col3 = st.columns(3)
129
+
130
  with col1 :
131
+ st.write(f"**{elapsed_time} secondi**")
132
+
133
  with col2 :
134
  st.write(f"**{int(len_response)} tokens generati**")
135
+
136
+ with col3 :
137
+ st.write(f"**{tokens_per_second} token/secondi**")
138
+
139
 
140
  return full_response
141
 
 
164
 
165
  st.session_state.history.append([prompt, full_response])
166
  st.session_state.messages.append({"role": "assistant", "content": full_response})
167
+ st.success('Generazione Completata')