NealCaren commited on
Commit
90399d1
1 Parent(s): 3bb3f74

Upload app.py

Browse files

it's the app

Files changed (1) hide show
  1. app.py +184 -0
app.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+ import numpy as np
4
+ import re
5
+ import pickle
6
+ from collections import OrderedDict
7
+ import io
8
+
9
+ from sentence_transformers import SentenceTransformer, CrossEncoder, util
10
+ import torch
11
+
12
+ from nltk.tokenize import sent_tokenize
13
+ import nltk
14
+
15
+ import gdown
16
+ import requests
17
+ from PIL import Image
18
+
19
+
20
+ # Trying to figure out some CSS stuff
21
+
22
+ st.markdown(
23
+ """
24
+ <style>
25
+ .streamlit-expanderHeader {
26
+ font-size: medium;
27
+ }
28
+ </style>
29
+ """,
30
+ unsafe_allow_html=True,
31
+ )
32
+
33
+
34
+ nltk.download('punkt')
35
+
36
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
37
+
38
+ import pandas as pd
39
+
40
+ @st.cache_data
41
+ def load_embeddings():
42
+ url = "https://drive.google.com/uc?export=download&id=14y-RQ18IQ3tP7p9iMTeDKSsvFAKz1bLv"
43
+ output = "embeddings.npy"
44
+ gdown.download(url, output, quiet=False)
45
+
46
+ corpus_embeddings = np.load(output)
47
+ return corpus_embeddings
48
+
49
+ @st.cache_data
50
+ def load_data():
51
+ url = "https://drive.google.com/uc?export=download&id=1--6zc38C-FfIb-C4BMG87Bvx947Z1UNO"
52
+ output = "passages.jsonl"
53
+ gdown.download(url, output, quiet=False)
54
+
55
+ df = pd.read_json(output, lines=True)
56
+
57
+ df.reset_index(inplace=True, drop=True)
58
+ return df
59
+
60
+
61
+ st.title('Related Social Movement Articles')
62
+
63
+ st.write('This project is a work-in-progress that searches the abstracts of recently-published articles related to social movements and retrieves the most relevant articles.')
64
+
65
+
66
+ with st.spinner(text="Loading data..."):
67
+ df = load_data()
68
+ passages = df['Abstract'].values
69
+
70
+ no_of_graphs=len(df)
71
+ no_of_articles = len(df['cite'].value_counts())
72
+
73
+
74
+ notes = f'''Notes:
75
+ * I have found three types of searches work best:
76
+ * Phrases or specific topics, such as "inequality in latin america", "race color skin tone measurement", "audit study experiment gender", or "logistic regression or linear probability model".
77
+ * Citations to well-known works, either using author year ("bourdieu 1984") or author idea ("Crenshaw intersectionality")
78
+ * Questions, like "What is a topic model?" or "How did Weber define bureaucracy?"
79
+ * The search expands beyond exact matching, so "asia social movements" may return paragraphs on Asian-Americans politics and South Korean labor unions.
80
+ * The first search can take up to 10 seconds as the files load. After that, it's quicker to respond.
81
+ * The most relevant paragraph to your search is returned first, along with up to four other related paragraphs from that article.
82
+ * The most relevant sentence within each paragraph, as determined by math, is displayed. Click on it to see the full paragraph.
83
+ * The results are not exhaustive, and seem to drift off even when you suspect there are more relevant articles :man-shrugging:.
84
+ * The dataset currently includes {no_of_graphs:,} paragraphs from {no_of_articles:,} published in the last five years in *Mobilization*, *Social Forces*, *Social Problems*, *Sociology of Race and Ethnicity*, *Gender and Society*, *Socius*, *JHSB*, *Annual Review of Sociology*, and the *American Sociological Review*.
85
+ * Behind the scenes, the semantic search uses [text embeddings](https://www.sbert.net) with a [retrieve & re-rank](https://colab.research.google.com/github/UKPLab/sentence-transformers/blob/master/examples/applications/retrieve_rerank/retrieve_rerank_simple_wikipedia.ipynb) process to find the best matches.
86
+ * Let [me](mailto:neal.caren@unc.edu) know what you think or it looks broken.
87
+ '''
88
+
89
+ # st.markdown(notes)
90
+
91
+
92
+ def sent_trans_load():
93
+ #We use the Bi-Encoder to encode all passages, so that we can use it with sematic search
94
+ bi_encoder = SentenceTransformer('multi-qa-MiniLM-L6-cos-v1')
95
+ bi_encoder.max_seq_length = 256 #Truncate long passages to 256 tokens, max 512
96
+ return bi_encoder
97
+
98
+ def sent_cross_load():
99
+ #We use the Bi-Encoder to encode all passages, so that we can use it with sematic search
100
+ cross_encoder = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2')
101
+ return cross_encoder
102
+
103
+
104
+
105
+
106
+
107
+
108
+
109
+ with st.spinner(text="Loading embeddings..."):
110
+ corpus_embeddings = load_embeddings()
111
+
112
+
113
+
114
+
115
+
116
+ def search(query, top_k=50):
117
+
118
+ ##### Sematic Search #####
119
+ # Encode the query using the bi-encoder and find potentially relevant passages
120
+ question_embedding = bi_encoder.encode(query, convert_to_tensor=True).to(device)
121
+
122
+
123
+ hits = util.semantic_search(question_embedding, corpus_embeddings, top_k=top_k)
124
+ hits = hits[0] # Get the hits for the first query
125
+ ##### Re-Ranking #####
126
+ # Now, score all retrieved passages with the cross_encoder
127
+ cross_inp = [[query, passages[hit['corpus_id']]] for hit in hits]
128
+ cross_scores = cross_encoder.predict(cross_inp)
129
+
130
+ # Sort results by the cross-encoder scores
131
+ for idx in range(len(cross_scores)):
132
+ hits[idx]['cross-score'] = cross_scores[idx]
133
+
134
+ # Output of top-5 hits from re-ranker
135
+ print("\n-------------------------\n")
136
+ print("Search Results")
137
+ hits = sorted(hits, key=lambda x: x['cross-score'], reverse=True)
138
+
139
+ hd = OrderedDict()
140
+ for hit in hits[0:30]:
141
+
142
+ row_id = hit['corpus_id']
143
+ cite = df.loc[row_id]['cite']
144
+ #graph = passages[row_id]
145
+ graph = df.loc[row_id]['Abstract']
146
+
147
+ # Find best sentence
148
+ ab_sentences= [s for s in sent_tokenize(graph)]
149
+ cross_inp = [[query, s] for s in ab_sentences]
150
+ cross_scores = cross_encoder.predict(cross_inp)
151
+ thesis = pd.Series(cross_scores, ab_sentences).sort_values().index[-1]
152
+ graph = graph.replace(thesis, f'**{thesis}**')
153
+
154
+ if cite in hd:
155
+
156
+ hd[cite].append(graph)
157
+ else:
158
+ hd[cite] = [graph]
159
+
160
+ for cite, graphs in hd.items():
161
+ st.markdown(cite)
162
+
163
+ for graph in graphs[:5]:
164
+ # refind the Thesis
165
+ thesis = re.findall('\*\*(.*?)\*\*', graph)[0]
166
+ #thesis = graph.split('.')[0]
167
+ with st.expander(thesis):
168
+ st.write(f'> {graph}')
169
+ st.write('')
170
+ # print("\t{:.3f}\t{}".format(hit['cross-score'], passages[hit['corpus_id']].replace("\n", " ")))
171
+
172
+
173
+
174
+ search_query = st.text_area('Enter abstract or search phrase:')
175
+ if search_query!='':
176
+ with st.spinner(text="Searching and sorting results."):
177
+
178
+ placeholder = st.empty()
179
+ with placeholder.container():
180
+ st.image('https://www.dropbox.com/s/yndn6lkesjga9a6/emerac.png?raw=1')
181
+ bi_encoder = sent_trans_load()
182
+ cross_encoder = sent_cross_load()
183
+ search(search_query)
184
+ placeholder.empty()