Asaad Almutareb commited on
Commit
ffcbf6b
β€’
1 Parent(s): 289adee

updated lc imports

Browse files
Files changed (3) hide show
  1. app.py +20 -5
  2. qa.py +14 -14
  3. requirements.txt +1 -0
app.py CHANGED
@@ -36,6 +36,12 @@ def create_gradio_interface(qa:RetrievalQAWithSourcesChain):
36
  result = qa({"query": query, "history": history, "question": question})
37
  return result
38
 
 
 
 
 
 
 
39
  css="""
40
  #col-container {max-width: 700px; margin-left: auto; margin-right: auto;}
41
  """
@@ -43,19 +49,28 @@ def create_gradio_interface(qa:RetrievalQAWithSourcesChain):
43
  title = """
44
  <div style="text-align: center;max-width: 1920px;">
45
  <h1>Chat with your Documentation</h1>
46
- <p style="text-align: center;">This is a privately hosten Docs AI Buddy, <br />
47
- It will help you with any question regarding the documentation of Ray ;)</p>
48
  </div>
49
  """
50
 
 
 
 
 
 
 
 
 
 
 
51
 
52
-
53
- with gr.Blocks(css=css) as demo:
54
  with gr.Column(min_width=900, elem_id="col-container"):
55
  gr.HTML(title)
56
- chatbot = gr.Chatbot([], elem_id="chatbot")
57
  #with gr.Row():
58
  # clear = gr.Button("Clear")
 
59
 
60
  with gr.Row():
61
  question = gr.Textbox(label="Question", placeholder="Type your question and hit Enter ")
 
36
  result = qa({"query": query, "history": history, "question": question})
37
  return result
38
 
39
+ def vote(data: gr.LikeData):
40
+ if data.liked:
41
+ print("You upvoted this response: ")
42
+ else:
43
+ print("You downvoted this response: ")
44
+
45
  css="""
46
  #col-container {max-width: 700px; margin-left: auto; margin-right: auto;}
47
  """
 
49
  title = """
50
  <div style="text-align: center;max-width: 1920px;">
51
  <h1>Chat with your Documentation</h1>
52
+ <p style="text-align: center;">This is a privately hosten Docs AI Buddy ;)</p>
 
53
  </div>
54
  """
55
 
56
+ head_style = """
57
+ <style>
58
+ @media (min-width: 1536px)
59
+ {
60
+ .gradio-container {
61
+ min-width: var(--size-full) !important;
62
+ }
63
+ }
64
+ </style>
65
+ """
66
 
67
+ with gr.Blocks(title="DocsBuddy AI πŸ€΅πŸ»β€β™‚οΈ", head=head_style) as demo:
 
68
  with gr.Column(min_width=900, elem_id="col-container"):
69
  gr.HTML(title)
70
+ chatbot = gr.Chatbot([], elem_id="chatbot", label="DocuBuddy πŸ€΅πŸ»β€β™‚οΈ")
71
  #with gr.Row():
72
  # clear = gr.Button("Clear")
73
+ chatbot.like(vote, None, None)
74
 
75
  with gr.Row():
76
  question = gr.Textbox(label="Question", placeholder="Type your question and hit Enter ")
qa.py CHANGED
@@ -6,7 +6,7 @@ import logging
6
  import os
7
  from dotenv import load_dotenv
8
 
9
- import time
10
 
11
  #boto3 for S3 access
12
  import boto3
@@ -14,10 +14,10 @@ from botocore import UNSIGNED
14
  from botocore.client import Config
15
 
16
  # HF libraries
17
- from langchain_community.llms import HuggingFaceHub
18
- from langchain_community.embeddings import HuggingFaceHubEmbeddings
19
  # vectorestore
20
- from langchain_community.vectorstores import Chroma
21
  from langchain_community.vectorstores import FAISS
22
  import zipfile
23
 
@@ -34,7 +34,7 @@ from langchain.globals import set_verbose
34
  # caching
35
  from langchain.globals import set_llm_cache
36
  # We can do the same thing with a SQLite cache
37
- from langchain.cache import SQLiteCache
38
 
39
 
40
  # template for prompt
@@ -70,17 +70,17 @@ if os.path.exists('.langchain.sqlite'):
70
  llm_model_name = "mistralai/Mistral-7B-Instruct-v0.1"
71
 
72
  # changed named to model_id to llm as is common
73
- llm = HuggingFaceHub(repo_id=llm_model_name, model_kwargs={
74
- # "temperature":0.1,
75
- "max_new_tokens":1024,
76
- "repetition_penalty":1.2,
77
- # "streaming": True,
78
- "return_full_text":False
79
- })
80
 
81
  # initialize Embedding config
82
  embedding_model_name = "sentence-transformers/multi-qa-mpnet-base-dot-v1"
83
- embeddings = HuggingFaceHubEmbeddings(repo_id=embedding_model_name)
84
 
85
  set_llm_cache(SQLiteCache(database_path=".langchain.sqlite"))
86
 
@@ -93,7 +93,7 @@ with zipfile.ZipFile(VS_DESTINATION, 'r') as zip_ref:
93
  zip_ref.extractall('./vectorstore/')
94
 
95
  FAISS_INDEX_PATH='./vectorstore/lc-faiss-multi-qa-mpnet'
96
- db = FAISS.load_local(FAISS_INDEX_PATH, embeddings)
97
 
98
  # use the cached embeddings instead of embeddings to speed up re-retrival
99
  # db = Chroma(persist_directory="./vectorstore", embedding_function=embeddings)
 
6
  import os
7
  from dotenv import load_dotenv
8
 
9
+ #import time
10
 
11
  #boto3 for S3 access
12
  import boto3
 
14
  from botocore.client import Config
15
 
16
  # HF libraries
17
+ from langchain_huggingface import HuggingFaceEndpoint
18
+ from langchain_huggingface.embeddings import HuggingFaceEmbeddings
19
  # vectorestore
20
+ #from langchain_community.vectorstores import Chroma
21
  from langchain_community.vectorstores import FAISS
22
  import zipfile
23
 
 
34
  # caching
35
  from langchain.globals import set_llm_cache
36
  # We can do the same thing with a SQLite cache
37
+ from langchain_community.cache import SQLiteCache
38
 
39
 
40
  # template for prompt
 
70
  llm_model_name = "mistralai/Mistral-7B-Instruct-v0.1"
71
 
72
  # changed named to model_id to llm as is common
73
+ llm = HuggingFaceEndpoint(
74
+ repo_id=llm_model_name,
75
+ temperature=0.1,
76
+ max_new_tokens=1024,
77
+ repetition_penalty=1.2,
78
+ return_full_text=False,
79
+ )
80
 
81
  # initialize Embedding config
82
  embedding_model_name = "sentence-transformers/multi-qa-mpnet-base-dot-v1"
83
+ embeddings = HuggingFaceEmbeddings(model_name=embedding_model_name)
84
 
85
  set_llm_cache(SQLiteCache(database_path=".langchain.sqlite"))
86
 
 
93
  zip_ref.extractall('./vectorstore/')
94
 
95
  FAISS_INDEX_PATH='./vectorstore/lc-faiss-multi-qa-mpnet'
96
+ db = FAISS.load_local(FAISS_INDEX_PATH, embeddings, allow_dangerous_deserialization=True)
97
 
98
  # use the cached embeddings instead of embeddings to speed up re-retrival
99
  # db = Chroma(persist_directory="./vectorstore", embedding_function=embeddings)
requirements.txt CHANGED
@@ -3,6 +3,7 @@ chromadb
3
  faiss-cpu
4
  langchain
5
  langchain-community
 
6
  python-dotenv
7
  bs4
8
  gradio
 
3
  faiss-cpu
4
  langchain
5
  langchain-community
6
+ langchain-huggingface
7
  python-dotenv
8
  bs4
9
  gradio