lewispons commited on
Commit
1f7d84d
β€’
1 Parent(s): 2d4243e

create app.py

Browse files
Files changed (1) hide show
  1. app.py +226 -0
app.py ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from streamlit_extras.no_default_selectbox import selectbox
3
+ import pandas as pd
4
+ from PIL import Image
5
+ from random import choices
6
+ import zipfile
7
+ import os
8
+
9
+ from gensim.corpora import Dictionary
10
+ from gensim.models import TfidfModel
11
+ from gensim.similarities import SparseMatrixSimilarity
12
+
13
+ from src.models.utils.constants import user_requests_tests, TEST_INPUTS
14
+ from src.models.utils.mlutilities import gensim_tokenizer, get_recomendations_metadata
15
+
16
+
17
+ st.set_page_config(page_title="Papers Recomendation App")
18
+
19
+ model_name = "GrammarGuru"
20
+
21
+ def folder_exists(folder_path):
22
+ if os.path.exists(folder_path) and os.path.isdir(folder_path):
23
+ return True
24
+ else:
25
+ return False
26
+
27
+
28
+
29
+ def unzip_file(zip_file_path: str, modelname: str = model_name):
30
+ if not folder_exists(f"models/{modelname}"):
31
+ try:
32
+ with zipfile.ZipFile(zip_file_path, 'r') as zip_ref:
33
+ zip_ref.extractall(f"models/")
34
+ st.write("Model Zip file Extraction completed!.")
35
+ except FileNotFoundError:
36
+ raise("Error: The specified zip file was not found.")
37
+ except zipfile.BadZipFile:
38
+ raise("Error: The specified file is not a valid zip file.")
39
+
40
+
41
+ hide_default_format = """
42
+ <style>
43
+ #MainMenu {visibility: hidden; }
44
+ footer {visibility: hidden;}
45
+ </style>
46
+ """
47
+ st.markdown(hide_default_format, unsafe_allow_html=True)
48
+
49
+ image = Image.open('reports/figures/arxiv-logo.jpg')
50
+
51
+ st.sidebar.image(image , caption="Arxiv Papers Recomendation System",width = 256)
52
+ app_mode = st.sidebar.selectbox("Choose app mode", ["Generate Recomendations", "About this Project", "About Me"])
53
+
54
+ st.title("ResearchRadar")
55
+
56
+
57
+ @st.cache_data
58
+ def load_papers_corpus(path: str):
59
+ return pd.read_parquet(path)
60
+
61
+ @st.cache_resource
62
+ def load_dict(path: str):
63
+ dict_corpus = Dictionary.load(path)
64
+ return dict_corpus
65
+
66
+ @st.cache_resource
67
+ def load_model(path: str ):
68
+ tfidf_model = TfidfModel.load(path)
69
+ return tfidf_model
70
+
71
+ @st.cache_resource
72
+ def load_sparse_matrix(path: str):
73
+ similarities = SparseMatrixSimilarity.load(path)
74
+ return similarities
75
+
76
+
77
+ if app_mode == "Generate Recomendations":
78
+ welcome_text = """
79
+ <div style="text-align: justify">Welcome to my paper recommendation project! This App is here to simplify your search for relevant scientific and academic papers. Our intelligent recommendation system, powered by <strong>Machine Learning and natural language processing</strong>, analyzes keywords, abstracts, titles, authors, and more to provide personalized suggestions based on your interests. Say goodbye to information overload and let us guide you towards new horizons in your quest for knowledge.
80
+ """
81
+ subjects = """
82
+ Our model is trained to recommend papers in various domains, including:
83
+ - Mathematics
84
+ - Statistics
85
+ - Electrical Engineering
86
+ - Quantitative Biology
87
+ - Economics
88
+
89
+ Say goodbye to information overload and let us guide you towards **new horizons** in your quest for knowledge. Join us and discover a streamlined way to **explore, learn, and stay ahead** in your field. Welcome aboard!
90
+ """
91
+ st.markdown(welcome_text, unsafe_allow_html=True)
92
+ st.markdown(subjects)
93
+ st.divider()
94
+
95
+
96
+ with st.container():
97
+ examples = """
98
+ ### Examples of prompts
99
+ - "Can you recommend papers that explore the application of deep learning in computer vision for object detection, image segmentation, and video analysis?"
100
+ - "Can you recommend papers that explore the use of deep reinforcement learning for autonomous driving, including perception, planning, and control?"
101
+ - "Could you provide papers on image and video compression algorithms based on the latest video coding standards, such as HEVC and AV1?"
102
+ - "Can you suggest recent papers on behavioral economics that investigate the role of emotions and biases in decision-making under uncertainty, particularly in the context of financial markets?"
103
+
104
+ """
105
+ st.markdown(examples)
106
+ st.divider()
107
+
108
+
109
+ with st.spinner('The model binaries are unziping ...'):
110
+ zip_file_path = "models/GrammarGuru.zip"
111
+ unzip_file(zip_file_path)
112
+
113
+ with st.spinner('The model binaries are loading, please wait...'):
114
+
115
+ df = load_papers_corpus("models/GrammarGuru/data/GrammarGuru.parquet.gzip")
116
+ dictionary = load_dict("models/GrammarGuru/dictionaries/GrammarGuru.dict")
117
+ model = load_model("models/GrammarGuru/tdidf/GrammarGuru.model")
118
+ matrix = load_sparse_matrix("models/GrammarGuru/similarities_matrix/GrammarGuru")
119
+ st.success('Models Loaded, yei!', icon="πŸš€")
120
+
121
+ st.markdown("#### Generate Recommendations")
122
+ # recs_number = st.slider("Enter the number of papers you need", min_value=1, max_value=10, value=3)
123
+ query = st.text_input("Enter the description of the Paper you need (the more descriptive, the better)", value="")
124
+
125
+ if query != "":
126
+ cleaned_prompt = gensim_tokenizer(query)
127
+
128
+ with st.spinner('Generating Recommendations ... '):
129
+ results_df = get_recomendations_metadata(query=query, df=df, n=3, dictionary=dictionary, index=matrix, tfidf_model=model)
130
+
131
+ ids = results_df['id'].to_list()
132
+ titles = results_df['title'].to_list()
133
+ authors = results_df['authors'].to_list()
134
+ categories = results_df['categories'].to_list()
135
+ abstracts = results_df['abstract'].to_list()
136
+ release_date = results_df['update_date'].to_list()
137
+
138
+ results = list(zip(ids, titles, authors, categories, abstracts, release_date))
139
+
140
+ st.write("Your top 3 papers:")
141
+ for result in results:
142
+ with st.container():
143
+ col1, col2 = st.columns([1,3])
144
+
145
+ with col1:
146
+ st.markdown(f"**Title:**")
147
+ st.markdown(f"**Author:**")
148
+ st.markdown(f"**Categories:**")
149
+ st.markdown(f"**release_date:**")
150
+ st.markdown(f"**Abstract:**")
151
+
152
+
153
+ with col2:
154
+ st.write(f"Title: {result[1]}")
155
+ st.write(f"Author: {result[2]}")
156
+ st.write(f"Categories: {result[3]}")
157
+ st.write(f"release_date: {result[5]}")
158
+ st.write(f"Abstract: {result[4]}")
159
+ st.markdown(f"""[Paper Link](https://arxiv.org/abs/{result[0]})""")
160
+ st.divider()
161
+ st.balloons()
162
+
163
+ else:
164
+ st.write("Please enter your prompt :)")
165
+
166
+
167
+
168
+
169
+
170
+ elif app_mode == "About this Project":
171
+ intro_text = """
172
+ Welcome to my paper recommendation project! This application aims to simplify and speed up the process of finding relevant scientific and academic papers. It utilizes Machine Learning techniques and natural language processing to provide an effective solution for students, researchers, and general users.
173
+
174
+ ### Key Features
175
+
176
+ - **Intelligent Recommendation System:** The application uses advanced algorithms to analyze keywords, abstracts, titles, authors, and other metadata associated with each paper.
177
+ - **Efficient Discovery Process:** By leveraging machine learning, the system identifies and suggests the most relevant papers based on the user's interests and areas of study.
178
+ - **Comprehensive Analysis:** The recommendation system performs an exhaustive analysis of various aspects of each paper to ensure accurate and targeted recommendations.
179
+ - **Time-saving Solution:** Instead of manually searching through vast amounts of information, users can rely on this application to streamline the paper discovery process.
180
+
181
+ ### Available Models
182
+
183
+ - SemanticSherlock: trained on 100% of the data
184
+ - LanguageLiberator: trained on 75% of the data
185
+ - TextualTango: trained on 50% of the data
186
+ - GrammarGuru: trained on 25% of the data **(Deployed Version)**
187
+
188
+ **Note:** Due to resource limitations on the free tier of Streamlit, only the GrammarGuru version of the model is available for deployment.
189
+
190
+
191
+ ### Benefits
192
+
193
+ - **Saves Time and Effort:** With the application's intelligent algorithms, users can avoid the challenges and time-consuming nature of searching for papers on their own.
194
+ - **Increased Relevance:** By considering keywords, abstracts, titles, authors, and other metadata, the recommendation system provides users with highly relevant paper suggestions.
195
+ - **Tailored to User Interests:** The system takes into account each user's interests and areas of study, ensuring that the recommended papers align with their specific needs.
196
+ - **Accessible to All Users:** Whether you are a student, researcher, or general user, this application is designed to cater to a wide range of users' needs.
197
+
198
+ ### Get Started
199
+
200
+ Explore, discover, and reach new horizons in your search for knowledge with our paper recommendation application. Simplify your journey to finding relevant papers and stay ahead in your field.
201
+
202
+ Take a look to this proyect in my [GitHub Repo](https://github.com/LewisPons/arxiv-paper-recommender-system)
203
+ """
204
+
205
+
206
+
207
+ st.markdown(intro_text)
208
+
209
+
210
+
211
+ elif app_mode == "About Me":
212
+ st.title('About Me')
213
+ mkdn = """
214
+ <p style="text-align: justify;">Hey there! I'm <strong>Luis Morales</strong>, a passionate data professional with a background in Actuarial Sciences and expertise in Data Engineering and Machine Learning. I love diving into complex data projects and helping organizations unlock the power of their data. From designing robust data pipelines to building powerful ML models, I enjoy the thrill of turning raw data into actionable insights. With my coding skills in Python and R, I'm always up for tackling challenging projects and learning new technologies along the way.
215
+ Thank you for taking the time to learn a little bit about me!</p>
216
+ """
217
+ st.markdown(mkdn, unsafe_allow_html=True)
218
+ st.success("Feel free to contact me here πŸ‘‡ ")
219
+
220
+ col1,col2,col3,col4 = st.columns((2,1,2,1))
221
+ col1.markdown('* [LinkedIn](https://www.linkedin.com/in/luis-morales-ponce/)')
222
+ col1.markdown('* [GitHub](https://github.com/LewisPons)')
223
+ image2 = Image.open('reports/figures/profile.jpeg')
224
+ st.image(image2, width=400)
225
+
226
+