Huzaifa367 commited on
Commit
c14a846
·
verified ·
1 Parent(s): bdca046

Update pages/summarizer.py

Browse files
Files changed (1) hide show
  1. pages/summarizer.py +112 -112
pages/summarizer.py CHANGED
@@ -1,112 +1,112 @@
1
- import streamlit as st
2
- from PyPDF2 import PdfReader
3
- from langchain.text_splitter import RecursiveCharacterTextSplitter
4
- from langchain_groq import ChatGroq
5
- from langchain_community.embeddings import HuggingFaceInferenceAPIEmbeddings
6
- from langchain.vectorstores import FAISS
7
- from langchain.chains.question_answering import load_qa_chain
8
- from langchain.prompts import PromptTemplate
9
- import tempfile
10
- from gtts import gTTS
11
- import os
12
-
13
- def text_to_speech(text):
14
- tts = gTTS(text=text, lang='en')
15
- audio_file = tempfile.NamedTemporaryFile(suffix=".mp3", delete=False)
16
- temp_filename = audio_file.name
17
- tts.save(temp_filename)
18
- st.audio(temp_filename, format='audio/mp3')
19
- os.remove(temp_filename)
20
-
21
- def get_pdf_text(pdf_docs):
22
- text=""
23
- for pdf in pdf_docs:
24
- pdf_reader= PdfReader(pdf)
25
- for page in pdf_reader.pages:
26
- text+= page.extract_text()
27
- return text
28
-
29
- def get_text_chunks(text):
30
- text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
31
- chunks = text_splitter.split_text(text)
32
- return chunks
33
-
34
- def get_vector_store(text_chunks, api_key):
35
- embeddings = HuggingFaceInferenceAPIEmbeddings(api_key=api_key, model_name="sentence-transformers/all-MiniLM-l6-v2")
36
- vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
37
- vector_store.save_local("faiss_index")
38
-
39
- def get_conversational_chain():
40
-
41
- prompt_template = """
42
- Answer the question as detailed as possible from the provided context, make sure to provide all the details, if the answer is not in
43
- provided context just say, "answer is not available in the context", don't provide the wrong answer\n\n
44
- Context:\n {context}?\n
45
- Question: \n{question}\n
46
- Answer:
47
- """
48
-
49
- model = ChatGroq(temperature=0, groq_api_key=os.environ["groq_api_key"], model_name="llama3-8b-8192")
50
-
51
- prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
52
- chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
53
-
54
- return chain
55
-
56
- def user_input(user_question, api_key):
57
- embeddings = HuggingFaceInferenceAPIEmbeddings(api_key=api_key, model_name="sentence-transformers/all-MiniLM-l6-v2")
58
-
59
- new_db = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)
60
- docs = new_db.similarity_search(user_question)
61
-
62
- chain = get_conversational_chain()
63
-
64
- response = chain(
65
- {"input_documents":docs, "question": user_question}
66
- , return_only_outputs=True)
67
-
68
- print(response) # Debugging line
69
-
70
- st.write("Replies:")
71
- if isinstance(response["output_text"], str):
72
- response_list = [response["output_text"]]
73
- else:
74
- response_list = response["output_text"]
75
-
76
- for text in response_list:
77
- st.write(text)
78
- # Convert text to speech for each response
79
- text_to_speech(text)
80
-
81
- def main():
82
-
83
- st.set_page_config(layout="centered")
84
- st.header("Chat with DOCS")
85
- st.markdown("<h1 style='font-size:20px;'>ChatBot by Muhammad Huzaifa</h1>", unsafe_allow_html=True)
86
- api_key = st.secrets["inference_api_key"]
87
-
88
-
89
- with st.sidebar:
90
- st.header("Chat with PDF")
91
- # st.title("Menu:")
92
- pdf_docs = st.file_uploader("Upload your PDF Files and Click on the Submit Button", accept_multiple_files=True, type=["pdf"])
93
- if st.button("Submit"):
94
- with st.spinner("Processing..."):
95
- raw_text = get_pdf_text(pdf_docs)
96
- text_chunks = get_text_chunks(raw_text)
97
- get_vector_store(text_chunks, api_key)
98
- st.success("Done")
99
-
100
- if st.button("Summerize Chat"):
101
- st.switch_page('pages/summarizer.py')
102
-
103
- # Check if any document is uploaded
104
- if pdf_docs:
105
- user_question = st.text_input("Ask a question from the Docs")
106
- if user_question:
107
- user_input(user_question, api_key)
108
- else:
109
- st.write("Please upload a document first to ask questions.")
110
-
111
- if __name__ == "__main__":
112
- main()
 
1
+ import streamlit as st
2
+ from PyPDF2 import PdfReader
3
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
4
+ from langchain_groq import ChatGroq
5
+ from langchain_community.embeddings import HuggingFaceInferenceAPIEmbeddings
6
+ from langchain.vectorstores import FAISS
7
+ from langchain.chains.question_answering import load_qa_chain
8
+ from langchain.prompts import PromptTemplate
9
+ import tempfile
10
+ from gtts import gTTS
11
+ import os
12
+
13
+ def text_to_speech(text):
14
+ tts = gTTS(text=text, lang='en')
15
+ audio_file = tempfile.NamedTemporaryFile(suffix=".mp3", delete=False)
16
+ temp_filename = audio_file.name
17
+ tts.save(temp_filename)
18
+ st.audio(temp_filename, format='audio/mp3')
19
+ os.remove(temp_filename)
20
+
21
+ def get_pdf_text(pdf_docs):
22
+ text=""
23
+ for pdf in pdf_docs:
24
+ pdf_reader= PdfReader(pdf)
25
+ for page in pdf_reader.pages:
26
+ text+= page.extract_text()
27
+ return text
28
+
29
+ def get_text_chunks(text):
30
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
31
+ chunks = text_splitter.split_text(text)
32
+ return chunks
33
+
34
+ def get_vector_store(text_chunks, api_key):
35
+ embeddings = HuggingFaceInferenceAPIEmbeddings(api_key=api_key, model_name="sentence-transformers/all-MiniLM-l6-v2")
36
+ vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
37
+ vector_store.save_local("faiss_index")
38
+
39
+ def get_conversational_chain():
40
+
41
+ prompt_template = """
42
+ Answer the question as detailed as possible from the provided context, make sure to provide all the details, if the answer is not in
43
+ provided context just say, "answer is not available in the context", don't provide the wrong answer\n\n
44
+ Context:\n {context}?\n
45
+ Question: \n{question}\n
46
+ Answer:
47
+ """
48
+
49
+ model = ChatGroq(temperature=0, groq_api_key=os.environ["groq_api_key"], model_name="llama3-8b-8192")
50
+
51
+ prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
52
+ chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
53
+
54
+ return chain
55
+
56
+ def user_input(user_question, api_key):
57
+ embeddings = HuggingFaceInferenceAPIEmbeddings(api_key=api_key, model_name="sentence-transformers/all-MiniLM-l6-v2")
58
+
59
+ new_db = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)
60
+ docs = new_db.similarity_search(user_question)
61
+
62
+ chain = get_conversational_chain()
63
+
64
+ response = chain(
65
+ {"input_documents":docs, "question": user_question}
66
+ , return_only_outputs=True)
67
+
68
+ print(response) # Debugging line
69
+
70
+ st.write("Replies:")
71
+ if isinstance(response["output_text"], str):
72
+ response_list = [response["output_text"]]
73
+ else:
74
+ response_list = response["output_text"]
75
+
76
+ for text in response_list:
77
+ st.write(text)
78
+ # Convert text to speech for each response
79
+ text_to_speech(text)
80
+
81
+ def main():
82
+
83
+ st.set_page_config(layout="centered")
84
+ st.header("Chat with DOCS")
85
+ st.markdown("<h1 style='font-size:20px;'>ChatBot by Muhammad Huzaifa</h1>", unsafe_allow_html=True)
86
+ api_key = st.secrets["inference_api_key"]
87
+
88
+
89
+ with st.sidebar:
90
+ st.header("Chat with PDF")
91
+ # st.title("Menu:")
92
+ pdf_docs = st.file_uploader("Upload your PDF Files and Click on the Submit Button", accept_multiple_files=True, type=["pdf"])
93
+ if st.button("Submit"):
94
+ with st.spinner("Processing..."):
95
+ raw_text = get_pdf_text(pdf_docs)
96
+ text_chunks = get_text_chunks(raw_text)
97
+ get_vector_store(text_chunks, api_key)
98
+ st.success("Done")
99
+
100
+ if st.button("Summerize Chat"):
101
+ st.switch_page('app.py')
102
+
103
+ # Check if any document is uploaded
104
+ if pdf_docs:
105
+ user_question = st.text_input("Ask a question from the Docs")
106
+ if user_question:
107
+ user_input(user_question, api_key)
108
+ else:
109
+ st.write("Please upload a document first to ask questions.")
110
+
111
+ if __name__ == "__main__":
112
+ main()