Jatinydv commited on
Commit
cb40971
1 Parent(s): 3bc0a9f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -36
app.py CHANGED
@@ -1,47 +1,62 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
 
 
 
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
 
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
 
26
- messages.append({"role": "user", "content": message})
 
 
 
27
 
28
- response = ""
 
 
 
 
 
 
 
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
 
39
- response += token
40
- yield response
 
 
 
 
 
 
41
 
42
- """
43
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
- """
 
 
 
 
45
  demo = gr.ChatInterface(
46
  respond,
47
  additional_inputs=[
@@ -58,6 +73,5 @@ demo = gr.ChatInterface(
58
  ],
59
  )
60
 
61
-
62
  if __name__ == "__main__":
63
- demo.launch()
 
1
  import gradio as gr
2
+ from langchain_community.document_loaders import PyPDFLoader, DirectoryLoader
3
+ from langchain.prompts import PromptTemplate
4
+ from langchain_community.embeddings import HuggingFaceEmbeddings
5
+ from langchain_community.vectorstores import FAISS
6
+ from langchain_community.llms import CTransformers
7
+ from langchain.chains import RetrievalQA
8
 
9
+ DB_FAISS_PATH = 'vectorstore/db_faiss'
 
 
 
10
 
11
+ custom_prompt_template = """Use the following pieces of information to answer the user's question.
12
+ If you don't know the answer, just say that you don't know, don't try to make up an answer.
13
 
14
+ Context: {context}
15
+ Question: {question}
 
 
 
 
 
 
 
16
 
17
+ Only return the helpful answer below and nothing else.
18
+ Helpful answer:
19
+ """
 
 
20
 
21
+ def set_custom_prompt():
22
+ prompt = PromptTemplate(template=custom_prompt_template,
23
+ input_variables=['context', 'question'])
24
+ return prompt
25
 
26
+ def retrieval_qa_chain(llm, prompt, db):
27
+ qa_chain = RetrievalQA.from_chain_type(llm=llm,
28
+ chain_type='stuff',
29
+ retriever=db.as_retriever(search_kwargs={'k': 2}),
30
+ return_source_documents=True,
31
+ chain_type_kwargs={'prompt': prompt}
32
+ )
33
+ return qa_chain
34
 
35
+ def load_llm():
36
+ llm = CTransformers(
37
+ model="TheBloke/Llama-2-7B-Chat-GGML",
38
+ model_type="llama",
39
+ max_new_tokens=512,
40
+ temperature=0.5
41
+ )
42
+ return llm
43
 
44
+ def qa_bot():
45
+ embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2",
46
+ model_kwargs={'device': 'cpu'})
47
+ db = FAISS.load_local(DB_FAISS_PATH, embeddings)
48
+ llm = load_llm()
49
+ qa_prompt = set_custom_prompt()
50
+ qa = retrieval_qa_chain(llm, qa_prompt, db)
51
+ return qa
52
 
53
+ # Define a function to respond to messages using your QA model
54
+ def respond(message, history, system_message, max_tokens, temperature, top_p):
55
+ qa_result = qa_bot()
56
+ response = qa_result({'query': message})
57
+ return response
58
+
59
+ # Create a Gradio interface using the respond function
60
  demo = gr.ChatInterface(
61
  respond,
62
  additional_inputs=[
 
73
  ],
74
  )
75
 
 
76
  if __name__ == "__main__":
77
+ demo.launch()