durgeshshisode1988 commited on
Commit
a6efa43
1 Parent(s): 954e4e0

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -0
app.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import os
3
+ from langchain_groq import ChatGroq
4
+ from langchain_openai import OpenAIEmbeddings
5
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
6
+ from langchain.chains.combine_documents import create_stuff_documents_chain
7
+ from langchain_core.prompts import ChatPromptTemplate
8
+ from langchain.chains import create_retrieval_chain
9
+ from langchain_community.vectorstores import FAISS
10
+
11
+ from langchain_community.document_loaders import PyPDFDirectoryLoader
12
+
13
+ from dotenv import load_dotenv
14
+
15
+ load_dotenv()
16
+
17
+ ## load the GroqAPI Key
18
+ os.environ['OPENAI_API_KEY']=os.getenv("OPENAI_API_KEY")
19
+ groq_api_key = os.getenv('GROQ_API_KEY')
20
+
21
+ st.title("ChatBot Demo for Error Codes")
22
+
23
+ llm=ChatGroq(groq_api_key=groq_api_key,
24
+ model="Llama3-8b-8192")
25
+
26
+
27
+ prompt = ChatPromptTemplate.from_template(
28
+ """
29
+ Answer the question based on the provided context only.
30
+ Please provide the most accurate response based on the question.
31
+ <context>
32
+ {context}
33
+ <context>
34
+ Question: {input}
35
+ """
36
+ )
37
+
38
+
39
+ def vector_embedding():
40
+
41
+ if "vectors" not in st.session_state:
42
+
43
+ st.session_state.embeddings = OpenAIEmbeddings()
44
+ st.session_state.loader = PyPDFDirectoryLoader("./data")
45
+ st.session_state.docs = st.session_state.loader.load()
46
+ st.session_state.text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
47
+ st.session_state.final_documents = st.session_state.text_splitter.split_documents(st.session_state.docs[:20])
48
+ st.session_state.vectors = FAISS.from_documents(st.session_state.final_documents, st.session_state.embeddings )
49
+
50
+
51
+ prompt1=st.text_input("Enter your question from Documents")
52
+
53
+ if st.button("Documents Embedding"):
54
+ vector_embedding()
55
+ st.write("VectorStore DB is ready")
56
+
57
+ import time
58
+
59
+ if prompt1:
60
+ start = time.process_time()
61
+ document_chain = create_stuff_documents_chain(llm, prompt)
62
+ retriever = st.session_state.vectors.as_retriever()
63
+ retrieval_chain = create_retrieval_chain(retriever, document_chain)
64
+ response = retrieval_chain.invoke({'input': prompt1})
65
+ print("Response time : ", time.process_time() - start)
66
+ st.write(response['answer'])
67
+
68
+ # With a Streamlit expander
69
+ with st.expander("Document Similarity Search"):
70
+ # Find the relevant chunks
71
+ for i, doc in enumerate(response["context"]):
72
+ st.write(doc.page_content)
73
+ st.write("------------------------------------")