amalsp commited on
Commit
d7593ac
1 Parent(s): ffed861
Files changed (1) hide show
  1. app.py +47 -0
app.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from src.helper import download_hugging_face_embeddings
3
+ from langchain.vectorstores import FAISS
4
+ from langchain.schema import Document
5
+ from langchain.llms import CTransformers
6
+ from langchain.chains import RetrievalQA
7
+ from dotenv import load_dotenv
8
+ import os
9
+
10
+ app = Flask(__name__)
11
+ load_dotenv()
12
+
13
+ # Download embeddings model
14
+ embeddings = download_hugging_face_embeddings()
15
+
16
+ # Create Document objects with dummy texts and embeddings
17
+ documents = [Document(page_content="dummy", embedding=embedding) for embedding in embeddings]
18
+
19
+ # Initialize FAISS vector store with documents
20
+ vector_store = FAISS.from_documents(documents, embeddings)
21
+
22
+ # Initialize CTransformers model (LLAMA)
23
+ llm = CTransformers(model="E:\\project\\Medical-Chatbot\\llama-2-7b-chat.ggmlv3.q4_0.bin", model_type="llama", config={'max_new_tokens': 512, 'temperature': 0.8})
24
+
25
+ # Initialize RetrievalQA chain
26
+ qa = RetrievalQA.from_chain_type(
27
+ llm=llm,
28
+ chain_type="stuff",
29
+ retriever=vector_store.as_retriever(search_kwargs={'k': 2}),
30
+ return_source_documents=True
31
+ )
32
+
33
+ @app.route("/")
34
+ def index():
35
+ return render_template('chat.html')
36
+
37
+ @app.route("/get", methods=["GET", "POST"])
38
+ def chat():
39
+ msg = request.form["msg"]
40
+ input = msg
41
+ print(input)
42
+ result = qa({"query": input})
43
+ print("Response : ", result["result"])
44
+ return str(result["result"])
45
+
46
+ if __name__ == '__main__':
47
+ app.run(host="0.0.0.0", port=8080, debug=True)