AstraBert commited on
Commit
fa20ff3
β€’
1 Parent(s): 96b6df4
Files changed (3) hide show
  1. __pycache__/utils.cpython-310.pyc +0 -0
  2. app.py +49 -33
  3. utils.py +1 -2
__pycache__/utils.cpython-310.pyc ADDED
Binary file (4.94 kB). View file
 
app.py CHANGED
@@ -4,54 +4,69 @@ import time
4
  from utils import *
5
 
6
  vectordb = ""
7
-
8
 
9
  def print_like_dislike(x: gr.LikeData):
10
  print(x.index, x.value, x.liked)
11
 
12
  def add_message(history, message):
13
- if len(message["files"]) > 0:
14
- history.append((message["files"], None))
15
- if message["text"] is not None and message["text"] != "":
16
- history.append((message["text"], None))
 
 
 
 
 
 
 
17
  return history, gr.MultimodalTextbox(value=None, interactive=False)
18
 
19
 
20
  def bot(history):
21
  global vectordb
22
  global tsk
23
- if type(history[-1][0]) != tuple:
24
- if vectordb == "":
25
- pipe = pipeline(tsk, tokenizer=tokenizer, model=model)
26
- response = pipe(history[-1][0])[0]
27
- response = response["generated_text"]
28
- history[-1][1] = ""
29
- for character in response:
30
- history[-1][1] += character
31
- time.sleep(0.05)
32
- yield history
33
- else:
34
- try:
35
- response = just_chatting(model=model, tokenizer=tokenizer, query=history[-1][0], vectordb=vectordb, chat_history=[convert_none_to_str(his) for his in history])["answer"]
36
  history[-1][1] = ""
37
  for character in response:
38
  history[-1][1] += character
39
  time.sleep(0.05)
40
  yield history
41
- except Exception as e:
42
- response = f"Sorry, the error '{e}' occured while generating the response; check [troubleshooting documentation](https://astrabert.github.io/everything-rag/#troubleshooting) for more"
43
- if type(history[-1][0]) == tuple:
44
- filelist = []
45
- for i in history[-1][0]:
46
- filelist.append(i)
47
- finalpdf = merge_pdfs(filelist)
48
- vectordb = create_a_persistent_db(finalpdf, os.path.dirname(finalpdf)+"_localDB", os.path.dirname(finalpdf)+"_embcache")
49
- response = "VectorDB was successfully created, now you can ask me anything about the document you uploaded!😊"
50
- history[-1][1] = ""
51
- for character in response:
52
- history[-1][1] += character
53
- time.sleep(0.05)
54
- yield history
 
 
 
 
 
 
 
 
 
 
 
 
 
55
 
56
  with gr.Blocks() as demo:
57
  chatbot = gr.Chatbot(
@@ -68,8 +83,9 @@ with gr.Blocks() as demo:
68
  bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])
69
 
70
  chatbot.like(print_like_dislike, None, None)
71
- gr.ClearButton(chatbot)
72
  demo.queue()
 
73
  if __name__ == "__main__":
74
  demo.launch()
75
 
 
4
  from utils import *
5
 
6
  vectordb = ""
7
+ histr = [[None, "Hi, I'm **everything-rag**πŸ€–.\nI'm here to assist you and let you chat with _your_ pdfs!\nCheck [my website](https://astrabert.github.io/everything-rag/) for troubleshooting and documentation reference\nHave fun!😊"]]
8
 
9
  def print_like_dislike(x: gr.LikeData):
10
  print(x.index, x.value, x.liked)
11
 
12
  def add_message(history, message):
13
+ global histr
14
+ if history is not None:
15
+ if len(message["files"]) > 0:
16
+ history.append((message["files"], None))
17
+ histr.append([message["files"], None])
18
+ if message["text"] is not None and message["text"] != "":
19
+ history.append((message["text"], None))
20
+ histr.append([message["text"], None])
21
+ else:
22
+ history = histr
23
+ add_message(history, message)
24
  return history, gr.MultimodalTextbox(value=None, interactive=False)
25
 
26
 
27
  def bot(history):
28
  global vectordb
29
  global tsk
30
+ global histr
31
+ if not history is None:
32
+ if type(history[-1][0]) != tuple:
33
+ if vectordb == "":
34
+ pipe = pipeline(tsk, tokenizer=tokenizer, model=model)
35
+ response = pipe(history[-1][0])[0]
36
+ response = response["generated_text"]
37
+ histr[-1][1] = response
 
 
 
 
 
38
  history[-1][1] = ""
39
  for character in response:
40
  history[-1][1] += character
41
  time.sleep(0.05)
42
  yield history
43
+ else:
44
+ try:
45
+ response = just_chatting(task=tsk, model=model, tokenizer=tokenizer, query=history[-1][0], vectordb=vectordb, chat_history=[convert_none_to_str(his) for his in history])["answer"]
46
+ history[-1][1] = ""
47
+ histr[-1][1] = response
48
+ for character in response:
49
+ history[-1][1] += character
50
+ time.sleep(0.05)
51
+ yield history
52
+ except Exception as e:
53
+ response = f"Sorry, the error '{e}' occured while generating the response; check [troubleshooting documentation](https://astrabert.github.io/everything-rag/#troubleshooting) for more"
54
+ if type(history[-1][0]) == tuple:
55
+ filelist = []
56
+ for i in history[-1][0]:
57
+ filelist.append(i)
58
+ finalpdf = merge_pdfs(filelist)
59
+ vectordb = create_a_persistent_db(finalpdf, os.path.dirname(finalpdf)+"_localDB", os.path.dirname(finalpdf)+"_embcache")
60
+ response = "VectorDB was successfully created, now you can ask me anything about the document you uploaded!😊"
61
+ histr[-1][1] = response
62
+ history[-1][1] = ""
63
+ for character in response:
64
+ history[-1][1] += character
65
+ time.sleep(0.05)
66
+ yield history
67
+ else:
68
+ history = histr
69
+ bot(history)
70
 
71
  with gr.Blocks() as demo:
72
  chatbot = gr.Chatbot(
 
83
  bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])
84
 
85
  chatbot.like(print_like_dislike, None, None)
86
+
87
  demo.queue()
88
+
89
  if __name__ == "__main__":
90
  demo.launch()
91
 
utils.py CHANGED
@@ -10,7 +10,6 @@ from langchain_community.embeddings import HuggingFaceEmbeddings
10
  from langchain.chains import ConversationalRetrievalChain
11
  import os
12
  from pypdf import PdfMerger
13
- from argparse import ArgumentParser
14
 
15
 
16
  mod = "microsoft/phi-1_5"
@@ -115,7 +114,7 @@ def just_chatting(
115
  model=model,
116
  tokenizer=tokenizer,
117
  max_new_tokens = 2048,
118
- repetition_penalty = float(10),
119
  )
120
 
121
  local_llm = HuggingFacePipeline(pipeline=pipe)
 
10
  from langchain.chains import ConversationalRetrievalChain
11
  import os
12
  from pypdf import PdfMerger
 
13
 
14
 
15
  mod = "microsoft/phi-1_5"
 
114
  model=model,
115
  tokenizer=tokenizer,
116
  max_new_tokens = 2048,
117
+ repetition_penalty = float(1.2),
118
  )
119
 
120
  local_llm = HuggingFacePipeline(pipeline=pipe)