Spaces:
Sleeping
Sleeping
John Landry
commited on
Commit
•
a120bed
1
Parent(s):
5b6ca23
do it
Browse files
kb.py
CHANGED
@@ -1,16 +1,59 @@
|
|
1 |
|
2 |
from langchain_pinecone import PineconeVectorStore
|
3 |
-
from langchain_core.vectorstores import VectorStore
|
4 |
-
from langchain_core.vectorstores import VectorStoreRetriever
|
5 |
from langchain_aws import BedrockEmbeddings
|
|
|
|
|
|
|
6 |
|
7 |
INDEX_NAME = "zeteo-health"
|
8 |
NAMESPACE = "knowedgebase"
|
9 |
EMBEDDING_DIMENSION = 1536
|
10 |
EMBEDDING_MODEL_NAME = "amazon.titan-embed-text-v1"
|
11 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
def getkb():
|
13 |
vectorstore = PineconeVectorStore(index_name=INDEX_NAME,
|
14 |
embedding=BedrockEmbeddings(model_id=EMBEDDING_MODEL_NAME),
|
15 |
namespace=NAMESPACE)
|
16 |
return vectorstore
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
|
2 |
from langchain_pinecone import PineconeVectorStore
|
|
|
|
|
3 |
from langchain_aws import BedrockEmbeddings
|
4 |
+
from langchain.chains.llm import LLMChain
|
5 |
+
from langchain.prompts import PromptTemplate
|
6 |
+
from langchain_aws import ChatBedrock
|
7 |
|
8 |
INDEX_NAME = "zeteo-health"
|
9 |
NAMESPACE = "knowedgebase"
|
10 |
EMBEDDING_DIMENSION = 1536
|
11 |
EMBEDDING_MODEL_NAME = "amazon.titan-embed-text-v1"
|
12 |
|
13 |
+
PROMPT = """
|
14 |
+
Your name is Zeteo Assistant.
|
15 |
+
You are answering questions about health.
|
16 |
+
Only use simple words and descriptions.
|
17 |
+
Respond with an empathetic tone.
|
18 |
+
Be informative but concise.
|
19 |
+
You will only respond in English.
|
20 |
+
Address your response in second person.
|
21 |
+
Use the first name if it is given to address the response.
|
22 |
+
Be friendly but formal, avoid phrases like "Hey there".
|
23 |
+
If the question is chit chat such as "Hello", "How are you?", "Good morning", "thank you" or "Howdy" respond with a simple greeting and do not include any other information, especially related to health.
|
24 |
+
Limit your response only to information contained in the <context>. When referring to the context, use the term "information".
|
25 |
+
If the question is not about healthcare and can't be found in the <context> respond with, "I am sorry, I don't have information to answer that question."
|
26 |
+
|
27 |
+
<context>
|
28 |
+
{context}
|
29 |
+
</context>
|
30 |
+
|
31 |
+
<profile>
|
32 |
+
{profile}
|
33 |
+
</profile>
|
34 |
+
|
35 |
+
<question>
|
36 |
+
{question}
|
37 |
+
</question>
|
38 |
+
"""
|
39 |
+
|
40 |
+
prompt = PromptTemplate.from_template(PROMPT)
|
41 |
+
|
42 |
def getkb():
|
43 |
vectorstore = PineconeVectorStore(index_name=INDEX_NAME,
|
44 |
embedding=BedrockEmbeddings(model_id=EMBEDDING_MODEL_NAME),
|
45 |
namespace=NAMESPACE)
|
46 |
return vectorstore
|
47 |
+
|
48 |
+
|
49 |
+
def get_response(question, context, name):
|
50 |
+
llm = ChatBedrock(model_id='anthropic.claude-3-sonnet-20240229-v1:0')
|
51 |
+
chat_chain = LLMChain(llm=llm, prompt=prompt)
|
52 |
+
result = chat_chain.invoke(
|
53 |
+
{
|
54 |
+
"context": context,
|
55 |
+
"question": question,
|
56 |
+
"profile": f"name: {name}"
|
57 |
+
}
|
58 |
+
)['text']
|
59 |
+
return result
|
main.py
CHANGED
@@ -2,7 +2,7 @@ from fastapi import FastAPI, Request, Query
|
|
2 |
import logging
|
3 |
from whatsapp_client import WhatsAppWrapper
|
4 |
import json
|
5 |
-
from kb import getkb
|
6 |
|
7 |
logger = logging.getLogger()
|
8 |
|
@@ -41,11 +41,13 @@ async def callback(request: Request):
|
|
41 |
message = value['messages'][0]
|
42 |
phone_num = message['from']
|
43 |
mesg_body = message['text']['body']
|
|
|
44 |
print("SEARCH: " + str(mesg_body))
|
45 |
-
|
46 |
-
|
|
|
47 |
client = WhatsAppWrapper()
|
48 |
-
client.send_text_message(
|
49 |
|
50 |
return {"status": "success"}
|
51 |
|
|
|
2 |
import logging
|
3 |
from whatsapp_client import WhatsAppWrapper
|
4 |
import json
|
5 |
+
from kb import getkb, get_response
|
6 |
|
7 |
logger = logging.getLogger()
|
8 |
|
|
|
41 |
message = value['messages'][0]
|
42 |
phone_num = message['from']
|
43 |
mesg_body = message['text']['body']
|
44 |
+
name = value['contacts'][0]['profile']['name']
|
45 |
print("SEARCH: " + str(mesg_body))
|
46 |
+
context = '\n'.join([doc[0].page_content for doc in vectorstore.similarity_search_with_score(mesg_body, k=6)])
|
47 |
+
response = get_response(mesg_body, context, name)
|
48 |
+
|
49 |
client = WhatsAppWrapper()
|
50 |
+
client.send_text_message(response, phone_num)
|
51 |
|
52 |
return {"status": "success"}
|
53 |
|
test.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
import json
|
2 |
-
from kb import getkb
|
3 |
from dotenv import load_dotenv
|
|
|
4 |
|
5 |
load_dotenv()
|
6 |
|
@@ -31,7 +32,7 @@ data = """{
|
|
31 |
"id": "wamid.HBgLMTUxMjc0MDQ2MjAVAgASGBQzQTEyQTM0OURGQUI4QzZDNTg2MAA=",
|
32 |
"timestamp": "1716933996",
|
33 |
"text": {
|
34 |
-
"body": "
|
35 |
},
|
36 |
"type": "text"
|
37 |
}
|
@@ -49,18 +50,17 @@ value = data['entry'][0]['changes'][0]['value']
|
|
49 |
|
50 |
def main():
|
51 |
vectorstore = getkb()
|
52 |
-
print('here i am!')
|
53 |
if 'messages' in value and 'from' in value['messages'][0] and 'text' in value['messages'][0]:
|
54 |
-
print('i got here!')
|
55 |
message = value['messages'][0]
|
|
|
56 |
phone_num = message['from']
|
57 |
mesg_body = message['text']['body']
|
58 |
-
print(f"SEARCH: {mesg_body}, PHONE: {phone_num}")
|
59 |
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
|
|
64 |
|
65 |
if __name__ == "__main__":
|
66 |
main()
|
|
|
1 |
import json
|
2 |
+
from kb import getkb, get_response
|
3 |
from dotenv import load_dotenv
|
4 |
+
from whatsapp_client import WhatsAppWrapper
|
5 |
|
6 |
load_dotenv()
|
7 |
|
|
|
32 |
"id": "wamid.HBgLMTUxMjc0MDQ2MjAVAgASGBQzQTEyQTM0OURGQUI4QzZDNTg2MAA=",
|
33 |
"timestamp": "1716933996",
|
34 |
"text": {
|
35 |
+
"body": "how do i get tested for prostate cancer?"
|
36 |
},
|
37 |
"type": "text"
|
38 |
}
|
|
|
50 |
|
51 |
def main():
|
52 |
vectorstore = getkb()
|
|
|
53 |
if 'messages' in value and 'from' in value['messages'][0] and 'text' in value['messages'][0]:
|
|
|
54 |
message = value['messages'][0]
|
55 |
+
name = value['contacts'][0]['profile']['name']
|
56 |
phone_num = message['from']
|
57 |
mesg_body = message['text']['body']
|
|
|
58 |
|
59 |
+
context = '\n'.join([doc[0].page_content for doc in vectorstore.similarity_search_with_score(mesg_body, k=6)])
|
60 |
+
response = get_response(mesg_body, context, name)
|
61 |
+
print(f"RESPONSE: {response}")
|
62 |
+
client = WhatsAppWrapper()
|
63 |
+
client.send_text_message("hello, right back at ya ..." + response, phone_num)
|
64 |
|
65 |
if __name__ == "__main__":
|
66 |
main()
|