harshxmishra commited on
Commit
e6f32c5
·
verified ·
1 Parent(s): 7af6925

Added all files

Browse files
Files changed (3) hide show
  1. app.py +169 -0
  2. index.faiss +0 -0
  3. requirements.txt +169 -0
app.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from PyPDF2 import PdfReader
3
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
4
+ from langchain.vectorstores import FAISS
5
+ from langchain.prompts import PromptTemplate
6
+ from langchain.chains.question_answering import load_qa_chain
7
+ from langchain_google_genai import GoogleGenerativeAIEmbeddings, ChatGoogleGenerativeAI
8
+ from dotenv import load_dotenv
9
+ import streamlit as st
10
+ import re
11
+ from google.generativeai import configure
12
+
13
+ # Load environment variables
14
+ load_dotenv()
15
+ os.getenv("GOOGLE_API_KEY")
16
+
17
+ # Configure Google Generative AI
18
+ configure(api_key=os.getenv("GOOGLE_API_KEY"))
19
+
20
+ def get_pdf_text(pdf_docs):
21
+ """Extract text and course details (title, description, and link) from the PDF."""
22
+ text = ""
23
+ course_details = []
24
+ for pdf in pdf_docs:
25
+ pdf_reader = PdfReader(pdf)
26
+ for page in pdf_reader.pages:
27
+ page_text = page.extract_text()
28
+ text += page_text
29
+
30
+ # Extract the course titles and links (bold titles and underlined links)
31
+ courses = extract_course_details(page_text)
32
+ course_details.extend(courses)
33
+
34
+ return text, course_details
35
+
36
+ def extract_course_details(page_text):
37
+ """Extract course title and link from the page text."""
38
+ course_details = []
39
+ # Regex to find bold titles and underlined links
40
+ title_pattern = r"(\*\*([A-Z\s]+)\*\*)(.*?)(http[s]?://[^\s]+)"
41
+ matches = re.findall(title_pattern, page_text)
42
+
43
+ for match in matches:
44
+ title = match[1].strip()
45
+ description = match[2].strip()
46
+ link = match[3].strip()
47
+
48
+ # Return tuple of course title, description, and link
49
+ course_details.append({
50
+ "title": title,
51
+ "description": description,
52
+ "link": link
53
+ })
54
+
55
+ return course_details
56
+
57
+ def get_text_chunks(text, course_details):
58
+ """Split the extracted text into chunks and append course links to the description."""
59
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000, chunk_overlap=1000)
60
+
61
+ chunks = []
62
+ for course in course_details:
63
+ course_text = f"**Course Title**: {course['title']}\n"
64
+ course_text += f"[Course Link]({course['link']})\n"
65
+ course_text += f"**Description**: {course['description']}\n"
66
+
67
+ text_chunks = text_splitter.split_text(course_text)
68
+ chunks.extend(text_chunks)
69
+
70
+ return chunks
71
+
72
+ def get_vector_store(text_chunks):
73
+ """Generate and store embeddings in a vector store."""
74
+ embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
75
+ vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
76
+ vector_store.save_local("embedding")
77
+ return vector_store
78
+
79
+ def generate_embeddings_from_pdf(pdf_docs):
80
+ """Generate and save embeddings from the PDF course file."""
81
+ # Extract text and course details from the PDF
82
+ raw_text, course_details = get_pdf_text(pdf_docs)
83
+
84
+ # Get text chunks with course title, link, and description
85
+ text_chunks = get_text_chunks(raw_text, course_details)
86
+
87
+ # Generate and save the vector store
88
+ vector_store = get_vector_store(text_chunks)
89
+
90
+ print(f"Embeddings generated and saved successfully.")
91
+ return vector_store
92
+
93
+ def load_vector_store():
94
+ """Load pre-generated embeddings from FAISS."""
95
+ embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
96
+ vector_store = FAISS.load_local("embedding", embeddings, allow_dangerous_deserialization=True)
97
+ return vector_store
98
+
99
+ def get_conversational_chain():
100
+ """Create a structured chain for processing search queries."""
101
+ system_prompt = """
102
+ You are an intelligent assistant helping users find the best free courses on data science, machine learning, and related fields.
103
+ When given a query, recommend courses by analyzing their relevance based on:
104
+ - Keywords
105
+ - Topics of interest
106
+ - User's goals (if provided)
107
+
108
+ Format your responses as:
109
+ - **Course Title**: <Title>
110
+ - [Course Link](<Link>)
111
+ - **Description**: <Brief Description>
112
+ - **Relevance**: <Why it is recommended>
113
+ Provide concise and actionable recommendations.
114
+ """
115
+
116
+ prompt_template = PromptTemplate(
117
+ template=system_prompt + "\nContext: {context}\nQuery: {query}",
118
+ input_variables=["context", "query"]
119
+ )
120
+ model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
121
+ chain = load_qa_chain(model, chain_type="stuff", prompt=prompt_template)
122
+
123
+ return chain
124
+
125
+ def user_input(user_query, keywords):
126
+ """Process user input and search for relevant courses."""
127
+ vector_store = load_vector_store()
128
+ chain = get_conversational_chain()
129
+
130
+ # Construct search query
131
+ query_context = f"Keywords: {keywords}. Query: {user_query}."
132
+ docs = vector_store.similarity_search(query_context)
133
+
134
+ # Get recommendations
135
+ response = chain({
136
+ "input_documents": docs,
137
+ "context": "Analytics Vidhya free courses database.",
138
+ "query": query_context
139
+ }, return_only_outputs=True)
140
+
141
+ return response["output_text"]
142
+
143
+ def main():
144
+ # Streamlit app UI
145
+ st.set_page_config("Smart Course Search", layout="wide")
146
+ st.title("Smart Course Search Tool")
147
+ st.write("Search for the most relevant free courses using natural language or keywords.")
148
+
149
+ # User inputs
150
+ user_query = st.text_input("Enter your search query or context (e.g., 'I want to learn deep learning')")
151
+ keywords = st.text_input("Enter specific keywords (comma-separated, e.g., 'NLP, data visualization')")
152
+
153
+ if st.button("Search Courses"):
154
+ if user_query or keywords:
155
+ with st.spinner("Searching for the best courses..."):
156
+ results = user_input(user_query, keywords)
157
+ st.success("Search Complete!")
158
+
159
+ # Beautify and display the results
160
+ if results:
161
+ formatted_results = results.replace("**", "<b>").replace("**", "</b>").replace("[", "<u>").replace("]", "</u>")
162
+ st.markdown(formatted_results, unsafe_allow_html=True)
163
+ else:
164
+ st.warning("No relevant courses found.")
165
+ else:
166
+ st.error("Please provide a query or keywords for searching.")
167
+
168
+ if __name__ == "__main__":
169
+ main()
index.faiss ADDED
Binary file (826 kB). View file
 
requirements.txt ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aiohttp==3.9.5
2
+ aiosignal==1.3.1
3
+ altair==5.3.0
4
+ annotated-types==0.7.0
5
+ anyio==4.4.0
6
+ asgiref==3.8.1
7
+ asttokens==2.4.1
8
+ async-timeout==4.0.3
9
+ attrs==23.2.0
10
+ backoff==2.2.1
11
+ bcrypt==4.2.0
12
+ blinker==1.8.2
13
+ build==1.2.1
14
+ cachetools==5.4.0
15
+ certifi==2024.7.4
16
+ charset-normalizer==3.3.2
17
+ chroma-hnswlib==0.7.6
18
+ chromadb==0.5.5
19
+ click==8.1.7
20
+ colorama==0.4.6
21
+ coloredlogs==15.0.1
22
+ comm==0.2.2
23
+ dataclasses-json==0.6.7
24
+ debugpy==1.8.2
25
+ decorator==5.1.1
26
+ Deprecated==1.2.14
27
+ dnspython==2.6.1
28
+ email_validator==2.2.0
29
+ exceptiongroup==1.2.2
30
+ executing==2.0.1
31
+ faiss-cpu==1.8.0.post1
32
+ fastapi==0.111.1
33
+ fastapi-cli==0.0.4
34
+ filelock==3.15.4
35
+ flatbuffers==24.3.25
36
+ frozenlist==1.4.1
37
+ fsspec==2024.6.1
38
+ gitdb==4.0.11
39
+ GitPython==3.1.43
40
+ google-ai-generativelanguage==0.6.6
41
+ google-api-core==2.19.1
42
+ google-api-python-client==2.138.0
43
+ google-auth==2.32.0
44
+ google-auth-httplib2==0.2.0
45
+ google-generativeai==0.7.2
46
+ googleapis-common-protos==1.63.2
47
+ greenlet==3.0.3
48
+ grpcio==1.65.1
49
+ h11==0.14.0
50
+ httpcore==1.0.5
51
+ httplib2==0.22.0
52
+ httptools==0.6.1
53
+ httpx==0.27.0
54
+ huggingface-hub==0.24.2
55
+ humanfriendly==10.0
56
+ idna==3.7
57
+ importlib_metadata==8.0.0
58
+ importlib_resources==6.4.0
59
+ ipykernel==6.29.5
60
+ ipython==8.18.1
61
+ jedi==0.19.1
62
+ Jinja2==3.1.4
63
+ jsonpatch==1.33
64
+ jsonpointer==3.0.0
65
+ jsonschema==4.23.0
66
+ jsonschema-specifications==2023.12.1
67
+ jupyter_client==8.6.2
68
+ jupyter_core==5.7.2
69
+ kubernetes==30.1.0
70
+ langchain==0.2.11
71
+ langchain-community==0.2.10
72
+ langchain-core==0.2.24
73
+ langchain-google-genai==1.0.8
74
+ langchain-text-splitters==0.2.2
75
+ langsmith==0.1.93
76
+ markdown-it-py==3.0.0
77
+ MarkupSafe==2.1.5
78
+ marshmallow==3.21.3
79
+ matplotlib-inline==0.1.7
80
+ mdurl==0.1.2
81
+ mmh3==4.1.0
82
+ monotonic==1.6
83
+ mpmath==1.3.0
84
+ multidict==6.0.5
85
+ mypy-extensions==1.0.0
86
+ nest-asyncio==1.6.0
87
+ numpy==1.26.4
88
+ oauthlib==3.2.2
89
+ onnxruntime==1.18.1
90
+ opentelemetry-api==1.26.0
91
+ opentelemetry-exporter-otlp-proto-common==1.26.0
92
+ opentelemetry-exporter-otlp-proto-grpc==1.26.0
93
+ opentelemetry-instrumentation==0.47b0
94
+ opentelemetry-instrumentation-asgi==0.47b0
95
+ opentelemetry-instrumentation-fastapi==0.47b0
96
+ opentelemetry-proto==1.26.0
97
+ opentelemetry-sdk==1.26.0
98
+ opentelemetry-semantic-conventions==0.47b0
99
+ opentelemetry-util-http==0.47b0
100
+ orjson==3.10.6
101
+ overrides==7.7.0
102
+ packaging==24.1
103
+ pandas==2.2.2
104
+ parso==0.8.4
105
+ pillow==10.4.0
106
+ platformdirs==4.2.2
107
+ posthog==3.5.0
108
+ prompt_toolkit==3.0.47
109
+ proto-plus==1.24.0
110
+ protobuf==3.20.2
111
+ psutil==6.0.0
112
+ pure_eval==0.2.3
113
+ pyarrow==17.0.0
114
+ pyasn1==0.6.0
115
+ pyasn1_modules==0.4.0
116
+ pydantic==2.8.2
117
+ pydantic_core==2.20.1
118
+ pydeck==0.9.1
119
+ Pygments==2.18.0
120
+ pyparsing==3.1.2
121
+ PyPDF2==3.0.1
122
+ PyPika==0.48.9
123
+ pyproject_hooks==1.1.0
124
+ pyreadline3==3.4.1
125
+ python-dateutil==2.9.0.post0
126
+ python-dotenv==1.0.1
127
+ python-multipart==0.0.9
128
+ pytz==2024.1
129
+ pywin32==306
130
+ PyYAML==6.0.1
131
+ pyzmq==26.0.3
132
+ referencing==0.35.1
133
+ requests==2.32.3
134
+ requests-oauthlib==2.0.0
135
+ rich==13.7.1
136
+ rpds-py==0.19.1
137
+ rsa==4.9
138
+ shellingham==1.5.4
139
+ six==1.16.0
140
+ smmap==5.0.1
141
+ sniffio==1.3.1
142
+ SQLAlchemy==2.0.31
143
+ stack-data==0.6.3
144
+ starlette==0.37.2
145
+ streamlit==1.37.0
146
+ sympy==1.13.1
147
+ tenacity==8.5.0
148
+ tokenizers==0.19.1
149
+ toml==0.10.2
150
+ tomli==2.0.1
151
+ toolz==0.12.1
152
+ tornado==6.4.1
153
+ tqdm==4.66.4
154
+ traitlets==5.14.3
155
+ typer==0.12.3
156
+ typing-inspect==0.9.0
157
+ typing_extensions==4.12.2
158
+ tzdata==2024.1
159
+ uritemplate==4.1.1
160
+ urllib3==2.2.2
161
+ uvicorn==0.30.3
162
+ watchdog==4.0.1
163
+ watchfiles==0.22.0
164
+ wcwidth==0.2.13
165
+ websocket-client==1.8.0
166
+ websockets==12.0
167
+ wrapt==1.16.0
168
+ yarl==1.9.4
169
+ zipp==3.19.2