Chris4K commited on
Commit
3caf785
1 Parent(s): 5b0506f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -93
app.py CHANGED
@@ -1,16 +1,10 @@
1
-
2
- import os
3
- os.environ["TOKENIZERS_PARALLELISM"] = "false"
4
-
5
  #####################################
6
- ## BitsAndBytes
7
  #####################################
8
 
9
  from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
10
- #from langchain.llms import HuggingFaceHub
11
  from langchain_community.llms import HuggingFaceHub
12
 
13
- model_name = "bn22/Mistral-7B-Instruct-v0.1-sharded"
14
 
15
  ###### other models:
16
  # "Trelis/Llama-2-7b-chat-hf-sharded-bf16"
@@ -18,30 +12,13 @@ model_name = "bn22/Mistral-7B-Instruct-v0.1-sharded"
18
  # "HuggingFaceH4/zephyr-7b-beta"
19
 
20
  # function for loading 4-bit quantized model
21
- def load_model(model_name: str):
22
 
23
  model = HuggingFaceHub(
24
  repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1",
25
  model_kwargs={"max_length": 1048, "temperature":0.2, "max_new_tokens":256, "top_p":0.95, "repetition_penalty":1.0},
26
  )
27
-
28
- """
29
- :param model_name: Name or path of the model to be loaded.
30
- :return: Loaded quantized model.
31
 
32
- bnb_config = BitsAndBytesConfig(
33
- load_in_4bit=True,
34
- bnb_4bit_use_double_quant=True,
35
- bnb_4bit_quant_type="nf4",
36
- bnb_4bit_compute_dtype=torch.bfloat16
37
- )
38
-
39
- model = AutoModelForCausalLM.from_pretrained(
40
- model_name,
41
- load_in_4bit=True,
42
- torch_dtype=torch.bfloat16,
43
- quantization_config=bnb_config
44
- )"""
45
  return model
46
 
47
  ##################################################
@@ -55,8 +32,6 @@ from langchain_community.document_loaders import WebBaseLoader
55
  from langchain.text_splitter import RecursiveCharacterTextSplitter
56
  from langchain_community.vectorstores import Chroma
57
 
58
- #from langchain_openai import OpenAIEmbeddings, ChatOpenAI
59
- #from langchain.embeddings import HuggingFaceBgeEmbeddings
60
  from langchain_community.embeddings import HuggingFaceBgeEmbeddings
61
  from langchain.vectorstores.faiss import FAISS
62
 
@@ -69,6 +44,22 @@ from langchain.chains.combine_documents import create_stuff_documents_chain
69
 
70
  load_dotenv()
71
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
  def get_vectorstore_from_url(url):
73
  # get the text in document form
74
  loader = WebBaseLoader(url)
@@ -94,34 +85,12 @@ def get_vectorstore_from_url(url):
94
 
95
  #vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
96
  vector_store = Chroma.from_documents(document_chunks, embeddings, persist_directory="/home/user/.cache/chroma_db")
97
-
98
-
99
-
100
-
101
- #######
102
- # create a vectorstore from the chunks
103
 
104
  return vector_store
105
 
 
106
 
107
-
108
-
109
-
110
- def get_context_retriever_chain(vector_store):
111
-
112
- # specify model huggingface mode name
113
- model_name = "anakin87/zephyr-7b-alpha-sharded"
114
- # model_name = "bn22/Mistral-7B-Instruct-v0.1-sharded"
115
-
116
- ###### other models:
117
- # "Trelis/Llama-2-7b-chat-hf-sharded-bf16"
118
- # "bn22/Mistral-7B-Instruct-v0.1-sharded"
119
- # "HuggingFaceH4/zephyr-7b-beta"
120
-
121
- # function for loading 4-bit quantized model
122
-
123
-
124
- llm = load_model(model_name)
125
 
126
  retriever = vector_store.as_retriever()
127
 
@@ -137,7 +106,7 @@ def get_context_retriever_chain(vector_store):
137
 
138
  def get_conversational_rag_chain(retriever_chain):
139
 
140
- llm = load_model(model_name)
141
 
142
  prompt = ChatPromptTemplate.from_messages([
143
  ("system", "Du bist eine freundlicher Mitarbeiterin Namens Susie und arbeitest in einenm Call Center. Du beantwortest basierend auf dem Context. Benutze nur den Inhalt des Context. Füge wenn möglich die Quelle hinzu. Antworte mit: Ich bin mir nicht sicher. Wenn die Antwort nicht aus dem Context hervorgeht. Antworte auf Deutsch, bitte? CONTEXT:\n\n{context}"),
@@ -149,44 +118,19 @@ def get_conversational_rag_chain(retriever_chain):
149
 
150
  return create_retrieval_chain(retriever_chain, stuff_documents_chain)
151
 
152
- #def get_response(user_input):
153
- # retriever_chain = get_context_retriever_chain(st.session_state.vector_store)
154
- # conversation_rag_chain = get_conversational_rag_chain(retriever_chain)
155
- #
156
- # response = conversation_rag_chain.invoke({
157
- # "chat_history": st.session_state.chat_history,
158
- # "input": user_query
159
- # })
160
-
161
- return response
162
-
163
-
164
 
165
  ###################
166
 
167
  ###################
168
  import gradio as gr
169
 
170
- ##from langchain_core.runnables.base import ChatPromptValue
171
- #from torch import tensor
172
 
173
- # Create Gradio interface
174
- #vector_store = None # Set your vector store here
175
  chat_history = [] # Set your chat history here
176
 
177
  # Define your function here
178
  def get_response(user_input):
179
 
180
- # Define the prompt as a ChatPromptValue object
181
- #user_input = ChatPromptValue(user_input)
182
-
183
- # Convert the prompt to a tensor
184
- #input_ids = user_input.tensor
185
-
186
-
187
- #vs = get_vectorstore_from_url(user_url, all_domain)
188
- vs = get_vectorstore_from_url("https://huggingface.co/Chris4K")
189
- # print("------ here 22 " )
190
  chat_history =[]
191
  retriever_chain = get_context_retriever_chain(vs)
192
  conversation_rag_chain = get_conversational_rag_chain(retriever_chain)
@@ -237,7 +181,7 @@ def get_response(message, history):
237
 
238
 
239
  #vs = get_vectorstore_from_url(user_url, all_domain)
240
- vs = get_vectorstore_from_url("https://huggingface.co/Chris4K")
241
 
242
  history =[]
243
  retriever_chain = get_context_retriever_chain(vs)
@@ -254,11 +198,7 @@ def get_response(message, history):
254
  last_part = parts[-1]
255
  return last_part#[-1]['generation']['content']
256
 
257
-
258
- #####
259
- vs = get_vectorstore_from_url("https://www.xing.com/profile/Christof_Kaller/web_profiles")
260
- #vs = get_vectorstore_from_url("https://www.linkedin.com/in/christof-kaller-6b043733/?originalSubdomain=de")
261
- vs = get_vectorstore_from_url("https://twitter.com/zX14_7")
262
 
263
 
264
 
@@ -304,15 +244,7 @@ def get_all_links_from_domain(domain_url):
304
  get_links_from_page(domain_url, visited_urls, domain_links)
305
  return domain_links
306
 
307
- # Example usage:
308
- domain_url = 'https://globl.contact/'
309
- links = get_all_links_from_domain(domain_url)
310
- print("Links from the domain:", links)
311
 
312
- #########
313
- # Assuming visited_urls is a list of URLs
314
- for url in links:
315
- vs = get_vectorstore_from_url(url)
316
 
317
 
318
 
@@ -332,4 +264,15 @@ app = gr.ChatInterface(
332
  clear_btn=None
333
  )
334
 
335
- app.launch(debug=True, share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  #####################################
2
+ ##
3
  #####################################
4
 
5
  from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
 
6
  from langchain_community.llms import HuggingFaceHub
7
 
 
8
 
9
  ###### other models:
10
  # "Trelis/Llama-2-7b-chat-hf-sharded-bf16"
 
12
  # "HuggingFaceH4/zephyr-7b-beta"
13
 
14
  # function for loading 4-bit quantized model
15
+ def load_model( ):
16
 
17
  model = HuggingFaceHub(
18
  repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1",
19
  model_kwargs={"max_length": 1048, "temperature":0.2, "max_new_tokens":256, "top_p":0.95, "repetition_penalty":1.0},
20
  )
 
 
 
 
21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  return model
23
 
24
  ##################################################
 
32
  from langchain.text_splitter import RecursiveCharacterTextSplitter
33
  from langchain_community.vectorstores import Chroma
34
 
 
 
35
  from langchain_community.embeddings import HuggingFaceBgeEmbeddings
36
  from langchain.vectorstores.faiss import FAISS
37
 
 
44
 
45
  load_dotenv()
46
 
47
+ def get_vectorstore():
48
+ '''
49
+ FAISS
50
+ A FAISS vector store containing the embeddings of the text chunks.
51
+ '''
52
+ model = "BAAI/bge-base-en-v1.5"
53
+ encode_kwargs = {
54
+ "normalize_embeddings": True
55
+ } # set True to compute cosine similarity
56
+ embeddings = HuggingFaceBgeEmbeddings(
57
+ model_name=model, encode_kwargs=encode_kwargs, model_kwargs={"device": "cpu"}
58
+ )
59
+ # load from disk
60
+ vector_store = Chroma(persist_directory="/home/user/.cache/chroma_db", embedding_function=embeddings)
61
+ return vector_store
62
+
63
  def get_vectorstore_from_url(url):
64
  # get the text in document form
65
  loader = WebBaseLoader(url)
 
85
 
86
  #vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
87
  vector_store = Chroma.from_documents(document_chunks, embeddings, persist_directory="/home/user/.cache/chroma_db")
 
 
 
 
 
 
88
 
89
  return vector_store
90
 
91
+ def get_context_retriever_chain(vector_store):
92
 
93
+ llm = load_model( )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
 
95
  retriever = vector_store.as_retriever()
96
 
 
106
 
107
  def get_conversational_rag_chain(retriever_chain):
108
 
109
+ llm = load_model( )
110
 
111
  prompt = ChatPromptTemplate.from_messages([
112
  ("system", "Du bist eine freundlicher Mitarbeiterin Namens Susie und arbeitest in einenm Call Center. Du beantwortest basierend auf dem Context. Benutze nur den Inhalt des Context. Füge wenn möglich die Quelle hinzu. Antworte mit: Ich bin mir nicht sicher. Wenn die Antwort nicht aus dem Context hervorgeht. Antworte auf Deutsch, bitte? CONTEXT:\n\n{context}"),
 
118
 
119
  return create_retrieval_chain(retriever_chain, stuff_documents_chain)
120
 
 
 
 
 
 
 
 
 
 
 
 
 
121
 
122
  ###################
123
 
124
  ###################
125
  import gradio as gr
126
 
 
 
127
 
 
 
128
  chat_history = [] # Set your chat history here
129
 
130
  # Define your function here
131
  def get_response(user_input):
132
 
133
+ vs = get_vectorstor()
 
 
 
 
 
 
 
 
 
134
  chat_history =[]
135
  retriever_chain = get_context_retriever_chain(vs)
136
  conversation_rag_chain = get_conversational_rag_chain(retriever_chain)
 
181
 
182
 
183
  #vs = get_vectorstore_from_url(user_url, all_domain)
184
+ vs = get_vectorstore()
185
 
186
  history =[]
187
  retriever_chain = get_context_retriever_chain(vs)
 
198
  last_part = parts[-1]
199
  return last_part#[-1]['generation']['content']
200
 
201
+
 
 
 
 
202
 
203
 
204
 
 
244
  get_links_from_page(domain_url, visited_urls, domain_links)
245
  return domain_links
246
 
 
 
 
 
247
 
 
 
 
 
248
 
249
 
250
 
 
264
  clear_btn=None
265
  )
266
 
267
+ app.launch(debug=True, share=True)
268
+
269
+ def __init__(self):
270
+ # Example usage:
271
+ domain_url = 'https://globl.contact/'
272
+ links = get_all_links_from_domain(domain_url)
273
+ print("Links from the domain:", links)
274
+
275
+ #########
276
+ # Assuming visited_urls is a list of URLs
277
+ for url in links:
278
+ vs = get_vectorstore_from_url(url)