harshxmishra commited on
Commit
55e747d
·
verified ·
1 Parent(s): d4215dd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +169 -169
app.py CHANGED
@@ -1,169 +1,169 @@
1
- import os
2
- from PyPDF2 import PdfReader
3
- from langchain.text_splitter import RecursiveCharacterTextSplitter
4
- from langchain.vectorstores import FAISS
5
- from langchain.prompts import PromptTemplate
6
- from langchain.chains.question_answering import load_qa_chain
7
- from langchain_google_genai import GoogleGenerativeAIEmbeddings, ChatGoogleGenerativeAI
8
- from dotenv import load_dotenv
9
- import streamlit as st
10
- import re
11
- from google.generativeai import configure
12
-
13
- # Load environment variables
14
- load_dotenv()
15
- os.getenv("GOOGLE_API_KEY")
16
-
17
- # Configure Google Generative AI
18
- configure(api_key=os.getenv("GOOGLE_API_KEY"))
19
-
20
- def get_pdf_text(pdf_docs):
21
- """Extract text and course details (title, description, and link) from the PDF."""
22
- text = ""
23
- course_details = []
24
- for pdf in pdf_docs:
25
- pdf_reader = PdfReader(pdf)
26
- for page in pdf_reader.pages:
27
- page_text = page.extract_text()
28
- text += page_text
29
-
30
- # Extract the course titles and links (bold titles and underlined links)
31
- courses = extract_course_details(page_text)
32
- course_details.extend(courses)
33
-
34
- return text, course_details
35
-
36
- def extract_course_details(page_text):
37
- """Extract course title and link from the page text."""
38
- course_details = []
39
- # Regex to find bold titles and underlined links
40
- title_pattern = r"(\*\*([A-Z\s]+)\*\*)(.*?)(http[s]?://[^\s]+)"
41
- matches = re.findall(title_pattern, page_text)
42
-
43
- for match in matches:
44
- title = match[1].strip()
45
- description = match[2].strip()
46
- link = match[3].strip()
47
-
48
- # Return tuple of course title, description, and link
49
- course_details.append({
50
- "title": title,
51
- "description": description,
52
- "link": link
53
- })
54
-
55
- return course_details
56
-
57
- def get_text_chunks(text, course_details):
58
- """Split the extracted text into chunks and append course links to the description."""
59
- text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000, chunk_overlap=1000)
60
-
61
- chunks = []
62
- for course in course_details:
63
- course_text = f"**Course Title**: {course['title']}\n"
64
- course_text += f"[Course Link]({course['link']})\n"
65
- course_text += f"**Description**: {course['description']}\n"
66
-
67
- text_chunks = text_splitter.split_text(course_text)
68
- chunks.extend(text_chunks)
69
-
70
- return chunks
71
-
72
- def get_vector_store(text_chunks):
73
- """Generate and store embeddings in a vector store."""
74
- embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
75
- vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
76
- vector_store.save_local("embedding")
77
- return vector_store
78
-
79
- def generate_embeddings_from_pdf(pdf_docs):
80
- """Generate and save embeddings from the PDF course file."""
81
- # Extract text and course details from the PDF
82
- raw_text, course_details = get_pdf_text(pdf_docs)
83
-
84
- # Get text chunks with course title, link, and description
85
- text_chunks = get_text_chunks(raw_text, course_details)
86
-
87
- # Generate and save the vector store
88
- vector_store = get_vector_store(text_chunks)
89
-
90
- print(f"Embeddings generated and saved successfully.")
91
- return vector_store
92
-
93
- def load_vector_store():
94
- """Load pre-generated embeddings from FAISS."""
95
- embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
96
- vector_store = FAISS.load_local("embedding", embeddings, allow_dangerous_deserialization=True)
97
- return vector_store
98
-
99
- def get_conversational_chain():
100
- """Create a structured chain for processing search queries."""
101
- system_prompt = """
102
- You are an intelligent assistant helping users find the best free courses on data science, machine learning, and related fields.
103
- When given a query, recommend courses by analyzing their relevance based on:
104
- - Keywords
105
- - Topics of interest
106
- - User's goals (if provided)
107
-
108
- Format your responses as:
109
- - **Course Title**: <Title>
110
- - [Course Link](<Link>)
111
- - **Description**: <Brief Description>
112
- - **Relevance**: <Why it is recommended>
113
- Provide concise and actionable recommendations.
114
- """
115
-
116
- prompt_template = PromptTemplate(
117
- template=system_prompt + "\nContext: {context}\nQuery: {query}",
118
- input_variables=["context", "query"]
119
- )
120
- model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
121
- chain = load_qa_chain(model, chain_type="stuff", prompt=prompt_template)
122
-
123
- return chain
124
-
125
- def user_input(user_query, keywords):
126
- """Process user input and search for relevant courses."""
127
- vector_store = load_vector_store()
128
- chain = get_conversational_chain()
129
-
130
- # Construct search query
131
- query_context = f"Keywords: {keywords}. Query: {user_query}."
132
- docs = vector_store.similarity_search(query_context)
133
-
134
- # Get recommendations
135
- response = chain({
136
- "input_documents": docs,
137
- "context": "Analytics Vidhya free courses database.",
138
- "query": query_context
139
- }, return_only_outputs=True)
140
-
141
- return response["output_text"]
142
-
143
- def main():
144
- # Streamlit app UI
145
- st.set_page_config("Smart Course Search", layout="wide")
146
- st.title("Smart Course Search Tool")
147
- st.write("Search for the most relevant free courses using natural language or keywords.")
148
-
149
- # User inputs
150
- user_query = st.text_input("Enter your search query or context (e.g., 'I want to learn deep learning')")
151
- keywords = st.text_input("Enter specific keywords (comma-separated, e.g., 'NLP, data visualization')")
152
-
153
- if st.button("Search Courses"):
154
- if user_query or keywords:
155
- with st.spinner("Searching for the best courses..."):
156
- results = user_input(user_query, keywords)
157
- st.success("Search Complete!")
158
-
159
- # Beautify and display the results
160
- if results:
161
- formatted_results = results.replace("**", "<b>").replace("**", "</b>").replace("[", "<u>").replace("]", "</u>")
162
- st.markdown(formatted_results, unsafe_allow_html=True)
163
- else:
164
- st.warning("No relevant courses found.")
165
- else:
166
- st.error("Please provide a query or keywords for searching.")
167
-
168
- if __name__ == "__main__":
169
- main()
 
1
+ import os
2
+ from PyPDF2 import PdfReader
3
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
4
+ from langchain.vectorstores import FAISS
5
+ from langchain.prompts import PromptTemplate
6
+ from langchain.chains.question_answering import load_qa_chain
7
+ from langchain_google_genai import GoogleGenerativeAIEmbeddings, ChatGoogleGenerativeAI
8
+ from dotenv import load_dotenv
9
+ import streamlit as st
10
+ import re
11
+ from google.generativeai import configure
12
+
13
+ # Load environment variables
14
+ load_dotenv()
15
+ os.getenv("GOOGLE_API_KEY")
16
+
17
+ # Configure Google Generative AI
18
+ configure(api_key=os.getenv("GOOGLE_API_KEY"))
19
+
20
+ def get_pdf_text(pdf_docs):
21
+ """Extract text and course details (title, description, and link) from the PDF."""
22
+ text = ""
23
+ course_details = []
24
+ for pdf in pdf_docs:
25
+ pdf_reader = PdfReader(pdf)
26
+ for page in pdf_reader.pages:
27
+ page_text = page.extract_text()
28
+ text += page_text
29
+
30
+ # Extract the course titles and links (bold titles and underlined links)
31
+ courses = extract_course_details(page_text)
32
+ course_details.extend(courses)
33
+
34
+ return text, course_details
35
+
36
+ def extract_course_details(page_text):
37
+ """Extract course title and link from the page text."""
38
+ course_details = []
39
+ # Regex to find bold titles and underlined links
40
+ title_pattern = r"(\*\*([A-Z\s]+)\*\*)(.*?)(http[s]?://[^\s]+)"
41
+ matches = re.findall(title_pattern, page_text)
42
+
43
+ for match in matches:
44
+ title = match[1].strip()
45
+ description = match[2].strip()
46
+ link = match[3].strip()
47
+
48
+ # Return tuple of course title, description, and link
49
+ course_details.append({
50
+ "title": title,
51
+ "description": description,
52
+ "link": link
53
+ })
54
+
55
+ return course_details
56
+
57
+ def get_text_chunks(text, course_details):
58
+ """Split the extracted text into chunks and append course links to the description."""
59
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000, chunk_overlap=1000)
60
+
61
+ chunks = []
62
+ for course in course_details:
63
+ course_text = f"**Course Title**: {course['title']}\n"
64
+ course_text += f"[Course Link]({course['link']})\n"
65
+ course_text += f"**Description**: {course['description']}\n"
66
+
67
+ text_chunks = text_splitter.split_text(course_text)
68
+ chunks.extend(text_chunks)
69
+
70
+ return chunks
71
+
72
+ def get_vector_store(text_chunks):
73
+ """Generate and store embeddings in a vector store."""
74
+ embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
75
+ vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
76
+ vector_store.save_local("embedding")
77
+ return vector_store
78
+
79
+ def generate_embeddings_from_pdf(pdf_docs):
80
+ """Generate and save embeddings from the PDF course file."""
81
+ # Extract text and course details from the PDF
82
+ raw_text, course_details = get_pdf_text(pdf_docs)
83
+
84
+ # Get text chunks with course title, link, and description
85
+ text_chunks = get_text_chunks(raw_text, course_details)
86
+
87
+ # Generate and save the vector store
88
+ vector_store = get_vector_store(text_chunks)
89
+
90
+ print(f"Embeddings generated and saved successfully.")
91
+ return vector_store
92
+
93
+ def load_vector_store():
94
+ """Load pre-generated embeddings from FAISS."""
95
+ embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
96
+ vector_store = FAISS.load_local("./", embeddings, allow_dangerous_deserialization=True)
97
+ return vector_store
98
+
99
+ def get_conversational_chain():
100
+ """Create a structured chain for processing search queries."""
101
+ system_prompt = """
102
+ You are an intelligent assistant helping users find the best free courses on data science, machine learning, and related fields.
103
+ When given a query, recommend courses by analyzing their relevance based on:
104
+ - Keywords
105
+ - Topics of interest
106
+ - User's goals (if provided)
107
+
108
+ Format your responses as:
109
+ - **Course Title**: <Title>
110
+ - [Course Link](<Link>)
111
+ - **Description**: <Brief Description>
112
+ - **Relevance**: <Why it is recommended>
113
+ Provide concise and actionable recommendations.
114
+ """
115
+
116
+ prompt_template = PromptTemplate(
117
+ template=system_prompt + "\nContext: {context}\nQuery: {query}",
118
+ input_variables=["context", "query"]
119
+ )
120
+ model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
121
+ chain = load_qa_chain(model, chain_type="stuff", prompt=prompt_template)
122
+
123
+ return chain
124
+
125
+ def user_input(user_query, keywords):
126
+ """Process user input and search for relevant courses."""
127
+ vector_store = load_vector_store()
128
+ chain = get_conversational_chain()
129
+
130
+ # Construct search query
131
+ query_context = f"Keywords: {keywords}. Query: {user_query}."
132
+ docs = vector_store.similarity_search(query_context)
133
+
134
+ # Get recommendations
135
+ response = chain({
136
+ "input_documents": docs,
137
+ "context": "Analytics Vidhya free courses database.",
138
+ "query": query_context
139
+ }, return_only_outputs=True)
140
+
141
+ return response["output_text"]
142
+
143
+ def main():
144
+ # Streamlit app UI
145
+ st.set_page_config("Smart Course Search", layout="wide")
146
+ st.title("Smart Course Search Tool")
147
+ st.write("Search for the most relevant free courses using natural language or keywords.")
148
+
149
+ # User inputs
150
+ user_query = st.text_input("Enter your search query or context (e.g., 'I want to learn deep learning')")
151
+ keywords = st.text_input("Enter specific keywords (comma-separated, e.g., 'NLP, data visualization')")
152
+
153
+ if st.button("Search Courses"):
154
+ if user_query or keywords:
155
+ with st.spinner("Searching for the best courses..."):
156
+ results = user_input(user_query, keywords)
157
+ st.success("Search Complete!")
158
+
159
+ # Beautify and display the results
160
+ if results:
161
+ formatted_results = results.replace("**", "<b>").replace("**", "</b>").replace("[", "<u>").replace("]", "</u>")
162
+ st.markdown(formatted_results, unsafe_allow_html=True)
163
+ else:
164
+ st.warning("No relevant courses found.")
165
+ else:
166
+ st.error("Please provide a query or keywords for searching.")
167
+
168
+ if __name__ == "__main__":
169
+ main()