mohcineelharras commited on
Commit
90e375f
1 Parent(s): 34fe6d3

timers, lighter footlball laws file

Browse files
Files changed (5) hide show
  1. README.md +2 -2
  2. app.py +13 -1
  3. data/football_laws.pdf +0 -3
  4. data/football_laws.txt +28 -0
  5. test.py +0 -15
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  title: Llama Index Docs Spaces
3
- emoji: 🌍
4
  colorFrom: purple
5
  colorTo: indigo
6
  sdk: streamlit
@@ -9,5 +9,5 @@ app_file: app.py
9
  pinned: false
10
  ---
11
 
12
- How to pick chunks that are pertinent ?
13
  How to stream response word by word ?
 
1
  ---
2
  title: Llama Index Docs Spaces
3
+ emoji: 📚
4
  colorFrom: purple
5
  colorTo: indigo
6
  sdk: streamlit
 
9
  pinned: false
10
  ---
11
 
12
+ How to pick chunks that are pertinent, ie help the model respond ?
13
  How to stream response word by word ?
app.py CHANGED
@@ -3,6 +3,7 @@
3
  import streamlit as st
4
  #import torch
5
  import os
 
6
  import logging
7
  import sys
8
  from llama_index.callbacks import CallbackManager, LlamaDebugHandler
@@ -189,7 +190,7 @@ with st.sidebar:
189
  tab1, tab2, tab3 = st.tabs(["LLM only", "LLM RAG QA with database", "One single document Q&A"])
190
 
191
  # -----------------------------------LLM only---------------------------------------------
192
-
193
  with tab1:
194
  st.title("💬 LLM only")
195
  prompt = st.text_area(
@@ -197,6 +198,7 @@ with tab1:
197
  placeholder="How do miners contribute to the security of the blockchain ?",
198
  )
199
  if prompt:
 
200
  contextual_prompt = st.session_state.memory + "\n" + prompt
201
  response = llm.complete(prompt,max_tokens=100, temperature=0, top_p=0.95, top_k=10)
202
  text_response = response
@@ -205,6 +207,8 @@ with tab1:
205
  st.session_state.memory = f"Prompt: {contextual_prompt}\nResponse:\n {text_response}"
206
  with open("short_memory.txt", 'w') as file:
207
  file.write(st.session_state.memory)
 
 
208
 
209
  # -----------------------------------LLM Q&A-------------------------------------------------
210
 
@@ -216,6 +220,7 @@ with tab2:
216
  placeholder="Who is Mohcine ?",
217
  )
218
  if prompt:
 
219
  contextual_prompt = st.session_state.memory + "\n" + prompt
220
  response = query_engine.query(contextual_prompt)
221
  text_response = response.response
@@ -228,15 +233,19 @@ with tab2:
228
  dict_source_i.update({"Text":node.node.text})
229
  st.write("Source n°"+str(i+1), dict_source_i)
230
  break
 
 
231
  st.session_state.memory = f"Prompt: {contextual_prompt}\nResponse:\n {text_response}"
232
  with open("short_memory.txt", 'w') as file:
233
  file.write(st.session_state.memory)
234
 
235
 
 
236
  # -----------------------------------Upload File Q&A-----------------------------------------
237
 
238
  with tab3:
239
  st.title("📝 One single document Q&A with Llama Index using local open llms")
 
240
  # if st.button('Reinitialize Query Engine', key='reinit_engine'):
241
  # del query_engine_upload_doc
242
  # st.write("Query engine reinitialized.")
@@ -271,6 +280,7 @@ with tab3:
271
  pass
272
 
273
  if uploaded_file and question and api_server_info:
 
274
  contextual_prompt = st.session_state.memory + "\n" + question
275
  response = query_engine_upload_doc.query(contextual_prompt)
276
  text_response = response.response
@@ -285,6 +295,8 @@ with tab3:
285
  dict_source_i = node.node.metadata
286
  dict_source_i.update({"Text":node.node.text})
287
  st.write("Source n°"+str(i+1), dict_source_i)
 
 
288
  #st.write("Source n°"+str(i))
289
  #st.write("Meta Data :", node.node.metadata)
290
  #st.write("Text :", node.node.text)
 
3
  import streamlit as st
4
  #import torch
5
  import os
6
+ import time
7
  import logging
8
  import sys
9
  from llama_index.callbacks import CallbackManager, LlamaDebugHandler
 
190
  tab1, tab2, tab3 = st.tabs(["LLM only", "LLM RAG QA with database", "One single document Q&A"])
191
 
192
  # -----------------------------------LLM only---------------------------------------------
193
+ #st.title("Demo of an offline Doc chatter")
194
  with tab1:
195
  st.title("💬 LLM only")
196
  prompt = st.text_area(
 
198
  placeholder="How do miners contribute to the security of the blockchain ?",
199
  )
200
  if prompt:
201
+ start_time_tab1 = time.time()
202
  contextual_prompt = st.session_state.memory + "\n" + prompt
203
  response = llm.complete(prompt,max_tokens=100, temperature=0, top_p=0.95, top_k=10)
204
  text_response = response
 
207
  st.session_state.memory = f"Prompt: {contextual_prompt}\nResponse:\n {text_response}"
208
  with open("short_memory.txt", 'w') as file:
209
  file.write(st.session_state.memory)
210
+ elapsed_time = time.time() - start_time_tab1
211
+ st.write(f"**Elapsed time**: {elapsed_time:.2f} seconds")
212
 
213
  # -----------------------------------LLM Q&A-------------------------------------------------
214
 
 
220
  placeholder="Who is Mohcine ?",
221
  )
222
  if prompt:
223
+ start_time_tab2 = time.time()
224
  contextual_prompt = st.session_state.memory + "\n" + prompt
225
  response = query_engine.query(contextual_prompt)
226
  text_response = response.response
 
233
  dict_source_i.update({"Text":node.node.text})
234
  st.write("Source n°"+str(i+1), dict_source_i)
235
  break
236
+ elapsed_time = time.time() - start_time_tab2
237
+ st.write(f"**Elapsed time**: {elapsed_time:.2f} seconds")
238
  st.session_state.memory = f"Prompt: {contextual_prompt}\nResponse:\n {text_response}"
239
  with open("short_memory.txt", 'w') as file:
240
  file.write(st.session_state.memory)
241
 
242
 
243
+
244
  # -----------------------------------Upload File Q&A-----------------------------------------
245
 
246
  with tab3:
247
  st.title("📝 One single document Q&A with Llama Index using local open llms")
248
+ st.write("For demonstration purposes, it is preferable to load light files to test with")
249
  # if st.button('Reinitialize Query Engine', key='reinit_engine'):
250
  # del query_engine_upload_doc
251
  # st.write("Query engine reinitialized.")
 
280
  pass
281
 
282
  if uploaded_file and question and api_server_info:
283
+ start_time_tab3 = time.time()
284
  contextual_prompt = st.session_state.memory + "\n" + question
285
  response = query_engine_upload_doc.query(contextual_prompt)
286
  text_response = response.response
 
295
  dict_source_i = node.node.metadata
296
  dict_source_i.update({"Text":node.node.text})
297
  st.write("Source n°"+str(i+1), dict_source_i)
298
+ elapsed_time = time.time() - start_time_tab3
299
+ st.write(f"**Elapsed time**: {elapsed_time:.2f} seconds")
300
  #st.write("Source n°"+str(i))
301
  #st.write("Meta Data :", node.node.metadata)
302
  #st.write("Text :", node.node.text)
data/football_laws.pdf DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:5670b3acb2944237a081390cba8231c34488dd02891708802e00e6ab2bebdc49
3
- size 2502068
 
 
 
 
data/football_laws.txt ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ This file was generated with chatgpt for demonstration purposes
2
+ The following are some rules of football, maybe the most well known.
3
+ Field of Play: Rectangular, with distinct lines. Length: 90-120m, Width: 45-90m.
4
+ The Ball: Spherical, circumference 68-70 cm, weight 410-450g at start.
5
+ Number of Players: Two teams of 11 players (including a goalkeeper). Minimum 7 players required to continue.
6
+ Player’s Equipment: Basic equipment includes jersey, shorts, socks, shin guards, and footwear.
7
+ Referee: Enforces the Laws of the Game; decisions are final.
8
+ Assistant Referees: Assist the referee; mainly in offside and throw-in decisions.
9
+ Duration of the Match: Two halves of 45 minutes each, plus added time for stoppages.
10
+ Start and Restart of Play: Coin toss decides kickoff and goal choice. Kickoff after goals and at half-time.
11
+ Ball In and Out of Play: Out of play when it fully crosses goal lines/end lines; restarts depend on how it went out.
12
+ Determining the Outcome of a Match: Winner determined by more goals scored. Ties often allowed.
13
+ Offside Rule: Player is offside if nearer to the opponent's goal line than the ball and second-last opponent.
14
+ Fouls and Misconduct: Direct and indirect free kicks and penalty kicks awarded for specific offences.
15
+ Free Kicks: Direct (goal can be scored directly) and indirect (requires another player to touch the ball first).
16
+ Penalty Kicks: Awarded for fouls within the penalty area; taken from the penalty spot.
17
+ Throw-in: Awarded when the ball crosses a sideline; must be thrown in with both hands.
18
+ Goal Kick: Awarded when the attacking team last touches the ball before it crosses the goal line.
19
+ Corner Kick: Awarded to attacking team when ball crosses the goal line off a defender.
20
+ Handball: Deliberate ball handling (except goalkeeper in own penalty area) is penalized.
21
+ Yellow & Red Cards: Caution (yellow) and send-off (red) for misconduct or rule violations.
22
+ Offences and Sanctions: Rules for penalizing various forms of foul play.
23
+ The Technical Area: Defines area where coaches and substitutes sit; includes behavior guidelines.
24
+ Video Assistant Referee (VAR): Used to review decisions on goals, penalties, direct red cards, and mistaken identity.
25
+ Goalkeeper Restrictions: Can't handle the ball outside the penalty area; limitations on handling back-passes.
26
+ Substitutions: Limited number allowed per match; procedure for player's entry and exit.
27
+ Respect: Emphasis on sportsmanship, respect for opponents, and fair play.
28
+
test.py DELETED
@@ -1,15 +0,0 @@
1
- import streamlit as st
2
-
3
- def update_text_area():
4
- # Your code to update the value of the text area
5
- st.session_state.text = "Updated text"
6
-
7
- if "text" not in st.session_state:
8
- st.session_state["text"] = ""
9
-
10
- text_area = st.text_area("Enter text", value=st.session_state["text"], height=200)
11
-
12
- # Button to trigger the callback function
13
- if st.button("Update text"):
14
- update_text_area()
15
-