ImranzamanML commited on
Commit
f08d86f
1 Parent(s): a53de33

Create ai_doctor

Browse files
Files changed (1) hide show
  1. ai_doctor +53 -0
ai_doctor ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.document_loaders.csv_loader import CSVLoader
2
+ from langchain.embeddings.openai import OpenAIEmbeddings
3
+ from langchain.embeddings import CacheBackedEmbeddings
4
+ from langchain_community.vectorstores import FAISS
5
+ from langchain.storage import LocalFileStore
6
+ from langchain.chains import RetrievalQA
7
+ from langchain_openai import ChatOpenAI
8
+ import os
9
+
10
+ def create_index():
11
+ # load the data
12
+ dir = os.path.dirname(__file__)
13
+ df_path = dir + '/data/Mental_Health_FAQ.csv'
14
+ loader = CSVLoader(file_path = df_path)
15
+ data = loader.load()
16
+
17
+ # create the embeddings model
18
+ embeddings_model = OpenAIEmbeddings()
19
+
20
+ # create the cache backed embeddings in vector store
21
+ store = LocalFileStore("./cache")
22
+ cached_embeder = CacheBackedEmbeddings.from_bytes_store(
23
+ embeddings_model, store, namespace=embeddings_model.model
24
+ )
25
+ vector_store = FAISS.from_documents(data, embeddings_model)
26
+
27
+ return vector_store.as_retriever()
28
+
29
+ def setup(openai_key):
30
+ # Set the API key for OpenAI
31
+ os.environ["OPENAI_API_KEY"] = openai_key
32
+ retriver = create_index()
33
+ llm = ChatOpenAI(model="gpt-4")
34
+ return retriver, llm
35
+
36
+ def mh_assistant(openai_key,query):
37
+
38
+ # Setup
39
+ retriever,llm = setup(openai_key)
40
+ # Create the QA chain
41
+ handler = StdOutCallbackHandler()
42
+
43
+ qa_with_sources_chain = RetrievalQA.from_chain_type(
44
+ llm=llm,
45
+ retriever=retriever,
46
+ callbacks=[handler],
47
+ return_source_documents=True
48
+ )
49
+
50
+ # Ask a question
51
+ res = qa_with_sources_chain({"query":query})
52
+ return (res['result'])
53
+