stephenleo commited on
Commit
34b12ff
1 Parent(s): 20813be

Initial Commit

Browse files
Files changed (5) hide show
  1. README.md +3 -3
  2. app.py +170 -0
  3. data.csv +77 -0
  4. helpers.py +194 -0
  5. requirements.txt +9 -0
README.md CHANGED
@@ -1,11 +1,11 @@
1
  ---
2
- title: S Tri P
3
- emoji: 💻
4
  colorFrom: pink
5
  colorTo: indigo
6
  sdk: streamlit
7
  app_file: app.py
8
- pinned: false
9
  ---
10
 
11
  # Configuration
 
1
  ---
2
+ title: 'S-Tri-P: Semantic Similarity of Scientific Papers'
3
+ emoji: 🔥
4
  colorFrom: pink
5
  colorTo: indigo
6
  sdk: streamlit
7
  app_file: app.py
8
+ pinned: true
9
  ---
10
 
11
  # Configuration
app.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import networkx as nx
2
+ from streamlit.components.v1 import html
3
+ import streamlit as st
4
+ import helpers
5
+ st.set_page_config(layout='wide',
6
+ page_title='S-Tri-P: Semantic Similarity of Scientific Papers!',
7
+ page_icon='💡'
8
+ )
9
+
10
+
11
+ def main():
12
+ st.title('S-Tri-P (S3P): Semantic Similarity of Scientific Papers!')
13
+
14
+ st.header('📂 Load Data')
15
+ uploaded_file = st.file_uploader("Choose a CSV file",
16
+ help='Upload a CSV file with the following columns: Title, Abstract')
17
+
18
+ ##########
19
+ # Load data
20
+ ##########
21
+ if uploaded_file is not None:
22
+ df = helpers.load_data(uploaded_file)
23
+ else:
24
+ df = helpers.load_data('data.csv')
25
+
26
+ data = df.copy()
27
+ st.write(f'Number of papers: {len(data)}')
28
+ st.write('First 5 rows of loaded data:')
29
+ st.write(data[['Title', 'Abstract']].head())
30
+
31
+ if data is not None:
32
+ ##########
33
+ # Topic modeling
34
+ ##########
35
+ st.header('🔥 Topic Modeling')
36
+
37
+ cols = st.columns(3)
38
+ with cols[0]:
39
+ min_topic_size = st.slider('Minimum topic size', key='min_topic_size', min_value=2,
40
+ max_value=int(len(data)/3), step=1, value=3,
41
+ help='The minimum size of the topic. Increasing this value will lead to a lower number of clusters/topics.')
42
+ with cols[1]:
43
+ n_gram_range = st.slider('N-gram range', key='n_gram_range', min_value=1,
44
+ max_value=4, step=1, value=(1, 3),
45
+ help='N-gram range for the topic model')
46
+ with cols[2]:
47
+ st.text('')
48
+ st.text('')
49
+ st.button('Reset Defaults', on_click=helpers.reset_default_topic_sliders, key='reset_topic_sliders',
50
+ kwargs={'min_topic_size': 3, 'n_gram_range': (1, 3)})
51
+
52
+ with st.spinner('Topic Modeling'):
53
+ data, topic_model, topics = helpers.topic_modeling(
54
+ data, min_topic_size=min_topic_size, n_gram_range=n_gram_range)
55
+
56
+ mapping = {
57
+ 'Topic Keywords': topic_model.visualize_barchart,
58
+ 'Topic Similarities': topic_model.visualize_heatmap,
59
+ 'Topic Hierarchies': topic_model.visualize_hierarchy,
60
+ 'Intertopic Distance': topic_model.visualize_topics
61
+ }
62
+
63
+ cols = st.columns(3)
64
+ with cols[0]:
65
+ topic_model_vis_option = st.selectbox(
66
+ 'Select Topic Modeling Visualization', mapping.keys())
67
+ try:
68
+ fig = mapping[topic_model_vis_option]()
69
+ fig.update_layout(title='')
70
+ st.plotly_chart(fig, use_container_width=True)
71
+ except:
72
+ st.warning(
73
+ 'No visualization available. Try a lower Minimum topic size!')
74
+
75
+ ##########
76
+ # S-TRI-P Network
77
+ ##########
78
+ st.header('🚀 S-TRI-P Network')
79
+
80
+ with st.spinner('Embedding generation'):
81
+ data = helpers.embeddings(data)
82
+
83
+ with st.spinner('Cosine Similarity Calculation'):
84
+ cosine_sim_matrix = helpers.cosine_sim(data)
85
+
86
+ min_value, value = helpers.calc_optimal_threshold(
87
+ cosine_sim_matrix,
88
+ # 25% is a good value for the number of papers
89
+ max_connections=helpers.calc_max_connections(len(data), 0.25)
90
+ )
91
+
92
+ cols = st.columns(3)
93
+ with cols[0]:
94
+ threshold = st.slider('Cosine Similarity Threshold', key='threshold', min_value=min_value,
95
+ max_value=1.0, step=0.01, value=value,
96
+ help='The minimum cosine similarity between papers to draw a connection. Increasing this value will lead to a lesser connections.')
97
+
98
+ neighbors, num_connections = helpers.calc_neighbors(
99
+ cosine_sim_matrix, threshold)
100
+ st.write(f'Number of connections: {num_connections}')
101
+
102
+ with cols[1]:
103
+ st.text('')
104
+ st.text('')
105
+ st.button('Reset Defaults', on_click=helpers.reset_default_threshold_slider, key='reset_threshold',
106
+ kwargs={'threshold': value})
107
+
108
+ with st.spinner('Network Generation'):
109
+ nx_net, pyvis_net = helpers.network_plot(
110
+ data, topics, neighbors)
111
+
112
+ # Save and read graph as HTML file (on Streamlit Sharing)
113
+ try:
114
+ path = '/tmp'
115
+ pyvis_net.save_graph(f'{path}/pyvis_graph.html')
116
+ HtmlFile = open(f'{path}/pyvis_graph.html',
117
+ 'r', encoding='utf-8')
118
+
119
+ # Save and read graph as HTML file (locally)
120
+ except:
121
+ path = '/html_files'
122
+ pyvis_net.save_graph(f'{path}/pyvis_graph.html')
123
+ HtmlFile = open(f'{path}/pyvis_graph.html',
124
+ 'r', encoding='utf-8')
125
+
126
+ # Load HTML file in HTML component for display on Streamlit page
127
+ html(HtmlFile.read(), height=800)
128
+
129
+ ##########
130
+ # Centrality
131
+ ##########
132
+ st.header('🏅 Most Important Papers')
133
+
134
+ centrality_mapping = {
135
+ 'Closeness Centrality': nx.closeness_centrality,
136
+ 'Degree Centrality': nx.degree_centrality,
137
+ 'Eigenvector Centrality': nx.eigenvector_centrality,
138
+ 'Betweenness Centrality': nx.betweenness_centrality,
139
+ }
140
+
141
+ cols = st.columns(3)
142
+ with cols[0]:
143
+ centrality_option = st.selectbox(
144
+ 'Select Centrality Measure', centrality_mapping.keys())
145
+
146
+ # Calculate centrality
147
+ centrality = centrality_mapping[centrality_option](nx_net)
148
+
149
+ with st.spinner('Network Centrality Calculation'):
150
+ fig = helpers.network_centrality(
151
+ data, centrality, centrality_option)
152
+ st.plotly_chart(fig, use_container_width=True)
153
+
154
+ st.markdown(
155
+ """
156
+ 💡🔥🚀 S-TRI-P v1.0 🚀🔥💡
157
+
158
+ 👨‍🔬 Author: Marie Stephen Leo
159
+
160
+ 👔 Linkedin: [Marie Stephen Leo](https://www.linkedin.com/in/marie-stephen-leo/)
161
+
162
+ 📝 Medium: [@stephen-leo](https://stephen-leo.medium.com/)
163
+
164
+ 💻 Github: [stephenleo](https://github.com/stephenleo)
165
+ """
166
+ )
167
+
168
+
169
+ if __name__ == '__main__':
170
+ main()
data.csv ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ,Title,Abstract
2
+ 0,Rethinking Search: Making Experts out of Dilettantes; Rethinking Search: Making Experts out of Dilettantes,"When experiencing an information need, users want to engage with an expert, but often turn to an information retrieval system, such as a search engine, instead. Classical information retrieval systems do not answer information needs directly, but instead provide references to (hopefully authoritative) answers. Successful question answering systems offer a limited corpus created on-demand by human experts, which is neither timely nor scalable. Large pre-trained language models, by contrast, are capable of directly generating prose that may be responsive to an information need, but at present they are dilettantes rather than experts-they do not have a true understanding of the world, they are prone to hallucinating, and crucially they are incapable of justifying their utterances by referring to supporting documents in the corpus they were trained over. This paper examines how ideas from classical information retrieval and large pre-trained language models can be synthesized and evolved into systems that truly deliver on the promise of expert advice. CCS CONCEPTS • Information systems → Retrieval models and ranking."
3
+ 1,Scaling Up Visual and Vision-Language Representation Learning With Noisy Text Supervision,"Pre-trained representations are becoming crucial for many NLP and perception tasks. While representation learning in NLP has transitioned to training on raw text without human annotations, visual and vision-language representations still rely heavily on curated training datasets that are expensive or require expert knowledge. For vision applications , representations are mostly learned using datasets with explicit class labels such as Ima-geNet or OpenImages. For vision-language, popular datasets like Conceptual Captions, MSCOCO, or CLIP all involve a non-trivial data collection (and cleaning) process. This costly curation process limits the size of datasets and hence hinders the scaling of trained models. In this paper, we leverage a noisy dataset of over one billion image alt-text pairs, obtained without expensive filtering or post-processing steps in the Conceptual Captions dataset. A simple dual-encoder architecture learns to align visual and language representations of the image and text pairs using a contrastive loss. We show that the scale of our corpus can make up for its noise and leads to state-of-the-art representations even with such a simple learning scheme. Our visual representation achieves strong performance when transferred to classification tasks such as ImageNet and VTAB. The aligned visual and language representations also set new state-of-the-art results on Flickr30K and MSCOCO benchmarks, even when compared with more sophisticated cross-attention models. The representations also enable cross-modality search with complex text and text + image queries."
4
+ 2,Machine learning as a model for cultural learning: Teaching an algorithm what it means to be fat,"As we navigate our cultural environment, we learn cultural biases, like those around gender, social class, health, and body weight. It is unclear, however, exactly how public culture becomes private culture. In this paper, we provide a theoretical account of such cultural learning. We propose that neural word embeddings provide a parsimonious and cognitively plausible model of the representations learned from natural language. Using neural word embeddings, we extract cultural schemata about body weight from New York Times articles. We identify several cultural schemata that link obesity to gender, immorality, poor health, and low socioeconomic class. Such schemata may be subtly but pervasively activated in public culture; thus, language can chronically reproduce biases. Our findings reinforce ongoing concerns that machine learning can also encode, and reproduce, harmful human biases. As we navigate our cultural environment, we learn to ascribe pejorative meanings to seemingly arbitrary differences. In the case of body weight, for example, media portrayals of fat and thin bodies may drive widespread idealization of thinness and fear of fat in the U.S (Grabe et al., 2008; López-Guimerà et al., 2010; Saguy and Almeling, 2008; Ravary et al., 2019; Ata and Thompson, 2010). While there is little doubt that public culture, like media, is a powerful source for cognitive socialization, it remains unclear exactly how an individual decodes and internalizes meanings in the cultural environment (Bail, 2014). Nor is it obvious how cultural meanings are encoded and reconstructed as they travel from public life to private thoughts (Lizardo, 2017; Foster, 2018). What are the cognitive and material processes that underlie the social construction of shared meanings? And how do cultural objects, like the text in news media, help to produce shared meanings? Our paper begins to tackle this gap between public and private culture, focusing on how we internalize public constructions of obesity. First, we suggest that schema learning is the core mechanism by which public culture becomes private culture. Second, we suggest that neural word embeddings, a language model inspired by human cognition, provide a parsimonious model for this cultural learning. Ultimately, we provide a cognitively plausible, computational model for learning private culture from public culture. Schematic processing is a key mechanism for cognitive socialization. We use the term schema (plural schemata) to denote the basic computational ""building blocks"" of human knowledge and information processing ("
5
+ 3,Context-Guided BERT for Targeted Aspect-Based Sentiment Analysis,"Aspect-based sentiment analysis (ABSA) and Targeted ABSA (TABSA) allow finer-grained inferences about sentiment to be drawn from the same text, depending on context. For example, a given text can have different targets (e.g., neighborhoods) and different aspects (e.g., price or safety), with different sentiment associated with each target-aspect pair. In this paper, we investigate whether adding context to self-attention models improves performance on (T)ABSA. We propose two variants of Context-Guided BERT (CG-BERT) that learn to distribute attention under different contexts. We first adapt a context-aware Transformer to produce a CG-BERT that uses context-guided softmax-attention. Next, we propose an improved Quasi-Attention CG-BERT model that learns a compositional attention that supports subtrac-tive attention. We train both models with pretrained BERT on two (T)ABSA datasets: SentiHood and SemEval-2014 (Task 4). Both models achieve new state-of-the-art results with our QACG-BERT model having the best performance. Furthermore , we provide analyses of the impact of context in the our proposed models. Our work provides more evidence for the utility of adding context-dependencies to pretrained self-attention-based language models for context-based natural language tasks."
6
+ 4,Knowledge Enhanced Contextual Word Representations,"Contextual word representations, typically trained on unstructured, unlabeled text, do not contain any explicit grounding to real world entities and are often unable to remember facts about those entities. We propose a general method to embed multiple knowledge bases (KBs) into large scale models, and thereby enhance their representations with structured, human-curated knowledge. For each KB, we first use an integrated entity linker to retrieve relevant entity embeddings, then update con-textual word representations via a form of word-to-entity attention. In contrast to previous approaches, the entity linkers and self-supervised language modeling objective are jointly trained end-to-end in a multitask setting that combines a small amount of entity linking supervision with a large amount of raw text. After integrating WordNet and a subset of Wikipedia into BERT, the knowledge enhanced BERT (KnowBert) demonstrates improved perplexity, ability to recall facts as measured in a probing task and downstream performance on relationship extraction, entity typing, and word sense disambiguation. KnowBert's runtime is comparable to BERT's and it scales to large KBs."
7
+ 5,Deep contextualized word representations,"We introduce a new type of deep contextual-ized word representation that models both (1) complex characteristics of word use (e.g., syntax and semantics), and (2) how these uses vary across linguistic contexts (i.e., to model polysemy). Our word vectors are learned functions of the internal states of a deep bidirec-tional language model (biLM), which is pre-trained on a large text corpus. We show that these representations can be easily added to existing models and significantly improve the state of the art across six challenging NLP problems, including question answering, tex-tual entailment and sentiment analysis. We also present an analysis showing that exposing the deep internals of the pre-trained network is crucial, allowing downstream models to mix different types of semi-supervision signals."
8
+ 6,Lexical Normalisation of Twitter Data,"Twitter with over 500 million users globally, generates over 100,000 tweets per minute 1. The 140 character limit per tweet, perhaps unintentionally, encourages users to use shorthand notations and to strip spellings to their bare minimum ""syllables"" or elisions e.g. ""srsly"". The analysis of Twitter messages which typically contain misspellings, elisions, and grammatical errors, poses a challenge to established Natural Language Processing (NLP) tools which are generally designed with the assumption that the data conforms to the basic grammatical structure commonly used in English language. In order to make sense of Twitter messages it is necessary to first transform them into a canonical form, consistent with the dictionary or grammar. This process, performed at the level of individual tokens (""words""), is called lexical normalisation. This paper investigates various techniques for lexical normalisation of Twitter data and presents the findings as the techniques are applied to process raw data from Twitter."
9
+ 7,Neural Machine Translation of Rare Words with Subword Units,"Neural machine translation (NMT) models typically operate with a fixed vocabulary , but translation is an open-vocabulary problem. Previous work addresses the translation of out-of-vocabulary words by backing off to a dictionary. In this paper , we introduce a simpler and more effective approach, making the NMT model capable of open-vocabulary translation by encoding rare and unknown words as sequences of subword units. This is based on the intuition that various word classes are translatable via smaller units than words, for instance names (via character copying or transliteration), compounds (via com-positional translation), and cognates and loanwords (via phonological and morphological transformations). We discuss the suitability of different word segmentation techniques, including simple character n-gram models and a segmentation based on the byte pair encoding compression algorithm , and empirically show that subword models improve over a back-off dictionary baseline for the WMT 15 translation tasks English→German and English→Russian by up to 1.1 and 1.3 BLEU, respectively."
10
+ 8,FNet: Mixing Tokens with Fourier Transforms,"We show that Transformer encoder architec-tures can be massively sped up, with limited accuracy costs, by replacing the self-attention sublayers with simple linear transformations that ""mix"" input tokens. These linear transformations , along with simple nonlinearities in feed-forward layers, are sufficient to model semantic relationships in several text classification tasks. Perhaps most surprisingly, we find that replacing the self-attention sublayer in a Transformer encoder with a standard, unpa-rameterized Fourier Transform achieves 92% of the accuracy of BERT on the GLUE benchmark , but pre-trains and runs up to seven times faster on GPUs and twice as fast on TPUs. The resulting model, which we name FNet, scales very efficiently to long inputs, matching the accuracy of the most accurate ""efficient"" Transformers on the Long Range Arena benchmark, but training and running faster across all sequence lengths on GPUs and relatively shorter sequence lengths on TPUs. Finally, FNet has a light memory footprint and is particularly efficient at smaller model sizes: for a fixed speed and accuracy budget, small FNet models out-perform Transformer counterparts."
11
+ 9,KG-BERT: BERT for Knowledge Graph Completion,"Knowledge graphs are important resources for many artificial intelligence tasks but often suffer from incompleteness. In this work, we propose to use pre-trained language models for knowledge graph completion. We treat triples in knowledge graphs as textual sequences and propose a novel framework named Knowledge Graph Bidirectional Encoder Representations from Transformer (KG-BERT) to model these triples. Our method takes entity and relation descriptions of a triple as input and computes scoring function of the triple with the KG-BERT language model. Experimental results on multiple benchmark knowledge graphs show that our method can achieve state-of-the-art performance in triple classification , link prediction and relation prediction tasks."
12
+ 10,RoBERTa: A Robustly Optimized BERT Pretraining Approach,"Language model pretraining has led to significant performance gains but careful comparison between different approaches is challenging. Training is computationally expensive , often done on private datasets of different sizes, and, as we will show, hyperparameter choices have significant impact on the final results. We present a replication study of BERT pretraining (Devlin et al., 2019) that carefully measures the impact of many key hyperparam-eters and training data size. We find that BERT was significantly undertrained, and can match or exceed the performance of every model published after it. Our best model achieves state-of-the-art results on GLUE, RACE and SQuAD. These results highlight the importance of previously overlooked design choices, and raise questions about the source of recently reported improvements. We release our models and code. 1"
13
+ 11,A Flexible Large-Scale Similar Product Identification Sys-tem in E-commerce,"Identifying similar products is a common pain-point in the world of E-commerce search and discovery. The key challenges lie in two aspects: 1) The definition of similarity varies across different applications , such as near identical products sold by different vendors , products that are substitutable to each other for customers with common interests, personalized products visually similar in terms of design pattern, style or color. It is difficult to build a general solution for all scenarios. 2) Computing pairwise similarities among billions of products are resource demanding which makes it challenging in large-scale data processing. To provide a flexible and consolidated solution at scale, this paper presents an all-in-one system, Product Similarity Service (PSS), which leverages the state-of-the-art Deep Neural Networks and distributed computing technology to serve diverse Amazon-scale product similarity computations. The experimental results show that PSS can return highly relevant similar products in both verification and ranking tasks, and has good computing efficiency and system scalability on large data volume."
14
+ 12,[CVPR20 Tutorial] Billion-scale Approximate Nearest Neighbor Search - Speaker Deck,
15
+ 13,Semi-supervised Category-specific Review Tagging on Indonesian E-Commerce Product Reviews,"Product reviews are a huge source of natural language data in e-commerce applications. Several millions of customers write reviews regarding a variety of topics. We categorize these topics into two groups as either “category-specific” topics or as “generic” topics that span multiple product categories. While we can use a supervised learning approach to tag review text for generic topics, it is impossible to use supervised approaches to tag category-specific topics due to the sheer number of possible topics for each category. In this paper, we present an approach to tag each review with several product category-specific tags on Indonesian language product reviews using a semi-supervised approach. We show that our proposed method can work at scale on real product reviews at Tokopedia, a major e-commerce platform in Indonesia. Manual evaluation shows that the proposed method can efficiently generate category-specific product tags."
16
+ 14,Design evaluation of graphene nanoribbon nanoelectromechanical devices,
17
+ 15,Graph Neural Networks for Natural Language Processing: A Survey,"Deep learning has become the dominant approach in coping with various tasks in Natural LanguageProcessing (NLP). Although text inputs are typically represented as a sequence of tokens, there isa rich variety of NLP problems that can be best expressed with a graph structure. As a result, thereis a surge of interests in developing new deep learning techniques on graphs for a large numberof NLP tasks. In this survey, we present a comprehensive overview onGraph Neural Networks(GNNs) for Natural Language Processing. We propose a new taxonomy of GNNs for NLP, whichsystematically organizes existing research of GNNs for NLP along three axes: graph construction,graph representation learning, and graph based encoder-decoder models. We further introducea large number of NLP applications that are exploiting the power of GNNs and summarize thecorresponding benchmark datasets, evaluation metrics, and open-source codes. Finally, we discussvarious outstanding challenges for making the full use of GNNs for NLP as well as future researchdirections. To the best of our knowledge, this is the first comprehensive overview of Graph NeuralNetworks for Natural Language Processing."
18
+ 16,DocBERT: BERT for Document Classification,"We present, to our knowledge, the first application of BERT to document classification. A few characteristics of the task might lead one to think that BERT is not the most appropriate model: syntactic structures matter less for content categories, documents can often be longer than typical BERT input, and documents often have multiple labels. Nevertheless, we show that a straightforward classification model using BERT is able to achieve the state of the art across four popular datasets. To address the computational expense associated with BERT inference, we distill knowledge from BERT-large to small bidirectional LSTMs, reaching BERT-base parity on multiple datasets using 30x fewer parameters. The primary contribution of our paper is improved baselines that can provide the foundation for future work."
19
+ 17,Self-Attention Between Datapoints: Going Beyond Individual Input-Output Pairs in Deep Learning,"We challenge a common assumption underlying most supervised deep learning: that a model makes a prediction depending only on its parameters and the features of a single input. To this end, we introduce a general-purpose deep learning architecture that takes as input the entire dataset instead of processing one datapoint at a time. Our approach uses self-attention to reason about relationships between datapoints explicitly, which can be seen as realizing non-parametric models using parametric attention mechanisms. However, unlike conventional non-parametric models, we let the model learn end-to-end from the data how to make use of other datapoints for prediction. Empirically, our models solve cross-datapoint lookup and complex reasoning tasks unsolvable by traditional deep learning models. We show highly competitive results on tabular data, early results on CIFAR-10, and give insight into how the model makes use of the interactions between points."
20
+ 18,Pentagon at MEDIQA 2019: Multi-task Learning for Filtering and Re-ranking Answers using Language Inference and Question Entailment,"Parallel deep learning architectures like fine-tuned BERT and MT-DNN, have quickly become the state of the art, bypassing previous deep and shallow learning methods by a large margin. More recently, pre-trained models from large related datasets have been able to perform well on many downstream tasks by just fine-tuning on domain-specific datasets . However, using powerful models on non-trivial tasks, such as ranking and large document classification, still remains a challenge due to input size limitations of parallel architecture and extremely small datasets (insufficient for fine-tuning). In this work, we introduce an end-to-end system, trained in a multi-task setting, to filter and re-rank answers in the medical domain. We use task-specific pre-trained models as deep feature extractors. Our model achieves the highest Spearman's Rho and Mean Reciprocal Rank of 0.338 and 0.9622 respectively, on the ACL-BioNLP workshop MediQA Question Answering shared-task."
21
+ 19,Named Entity Recognition for Social Media Texts with Semantic Augmentation,"Existing approaches for named entity recognition suffer from data sparsity problems when conducted on short and informal texts, especially user-generated social media content. Semantic augmentation is a potential way to alleviate this problem. Given that rich semantic information is implicitly preserved in pre-trained word embeddings, they are potential ideal resources for semantic augmentation. In this paper, we propose a neural-based approach to NER for social media texts where both local (from running text) and augmented semantics are taken into account. In particular, we obtain the augmented semantic information from a large-scale corpus, and propose an attentive semantic augmentation module and a gate module to encode and aggregate such information, respectively. Extensive experiments are performed on three benchmark datasets collected from English and Chinese social media platforms, where the results demonstrate the superiority of our approach to previous studies across all three datasets."
22
+ 20,Unsupervised Data Augmentation for Consistency Training,"Semi-supervised learning lately has shown much promise in improving deep learning models when labeled data is scarce. Common among recent approaches is the use of consistency training on a large amount of unlabeled data to constrain model predictions to be invariant to input noise. In this work, we present a new perspective on how to effectively noise unlabeled examples and argue that the quality of noising, specifically those produced by advanced data augmentation methods, plays a crucial role in semi-supervised learning. By substituting simple noising operations with advanced data augmentation methods such as RandAugment and back-translation, our method brings substantial improvements across six language and three vision tasks under the same consistency training framework. On the IMDb text classification dataset, with only 20 labeled examples, our method achieves an error rate of 4.20, outperforming the state-of-the-art model trained on 25,000 labeled examples. On a standard semi-supervised learning benchmark, CIFAR-10, our method outperforms all previous approaches and achieves an error rate of 5.43 with only 250 examples. Our method also combines well with transfer learning, e.g., when finetuning from BERT, and yields improvements in high-data regime, such as ImageNet, whether when there is only 10% labeled data or when a full labeled set with 1.3M extra unlabeled examples is used. Code is available at https://github.com/google-research/uda."
23
+ 21,A Visual Survey of Data Augmentation in NLP,An extensive overview of text data augmentation techniques for Natural Language Processing
24
+ 22,Dice Loss for Data-imbalanced NLP Tasks,"Many NLP tasks such as tagging and machine reading comprehension are faced with the severe data imbalance issue: negative examples significantly outnumber positive examples, and the huge number of background examples (or easy-negative examples) overwhelms the training. The most commonly used cross entropy (CE) criteria is actually an accuracy-oriented objective, and thus creates a discrepancy between training and test: at training time, each training instance contributes equally to the objective function, while at test time F1 score concerns more about positive examples. In this paper, we propose to use dice loss in replacement of the standard cross-entropy objective for data-imbalanced NLP tasks. Dice loss is based on the Sorensen-Dice coefficient or Tversky index, which attaches similar importance to false positives and false negatives, and is more immune to the data-imbalance issue. To further alleviate the dominating influence from easy-negative examples in training, we propose to associate training examples with dynamically adjusted weights to deemphasize easy-negative examples.Theoretical analysis shows that this strategy narrows down the gap between the F1 score in evaluation and the dice loss in training. With the proposed training objective, we observe significant performance boost on a wide range of data imbalanced NLP tasks. Notably, we are able to achieve SOTA results on CTB5, CTB6 and UD1.4 for the part of speech tagging task; SOTA results on CoNLL03, OntoNotes5.0, MSRA and OntoNotes4.0 for the named entity recognition task; along with competitive results on the tasks of machine reading comprehension and paraphrase identification."
25
+ 23,Small-text: Active Learning for Text Classification in Python,"We present small-text, a simple modular active learning library, which offers pool-based active learning for text classification in Python. It comes with various pre-implemented state-of-the-art query strategies, including some which can leverage the GPU. Clearly defined interfaces allow to combine a multitude of such query strategies with different classifiers, thereby facilitating a quick mix and match, and enabling a rapid development of both active learning experiments and applications. To make various classifiers accessible in a consistent way, it integrates several well-known machine learning libraries, namely, scikit-learn, PyTorch, and huggingface transformers -- for which the latter integrations are available as optionally installable extensions. The library is available under the MIT License at https://github.com/webis-de/small-text."
26
+ 24,Adversarial Multiclass Learning under Weak Supervision with Performance Guarantees,"We develop a rigorous approach for using a set of arbitrarily correlated weak supervision sources in order to solve a multiclass classification task when only a very small set of labeled data is available. Our learning algorithm provably converges to a model that has minimum empirical risk with respect to an adversarial choice over feasible labelings for a set of unlabeled data, where the feasibility of a labeling is computed through constraints defined by rigorously estimated statistics of the weak supervision sources. We show theoretical guarantees for this approach that depend on the information provided by the weak supervision sources. Notably, this method does not require the weak supervision sources to have the same labeling space as the multiclass classification task. We demonstrate the effectiveness of our approach with experiments on various image classification tasks."
27
+ 25,Approaching (almost) Any NLP Problem,
28
+ 26,Active Multi-Label Crowd Consensus,"Crowdsourcing is an economic and efficient strategy aimed at collecting annotations of data through an online platform. Crowd workers with different expertise are paid for their service, and the task requester usually has a limited budget. How to collect reliable annotations for multi-label data and how to compute the consensus within budget is an interesting and challenging, but rarely studied, problem. In this paper, we propose a novel approach to accomplish Active Multi-label Crowd Consensus (AMCC). AMCC accounts for the commonality and individuality of workers, and assumes that workers can be organized into different groups. Each group includes a set of workers who share a similar annotation behavior and label correlations. To achieve an effective multi-label consensus, AMCC models workers' annotations via a linear combination of commonality and individuality, and reduces the impact of unreliable workers by assigning smaller weights to the group. To collect reliable annotations with reduced cost, AMCC introduces an active crowdsourcing learning strategy that selects sample-label-worker triplets. In a triplet, the selected sample and label are the most informative for the consensus model, and the selected worker can reliably annotate the sample with low cost. Our experimental results on multi-label datasets demonstrate the advantages of AMCC over state-of-the-art solutions on computing crowd consensus and on reducing the budget by choosing cost-effective triplets."
29
+ 27,skweak: Weak Supervision Made Easy for NLP,"We present skweak, a versatile, Python-based software toolkit enabling NLP developers to apply weak supervision to a wide range of NLP tasks. Weak supervision is an emerging machine learning paradigm based on a simple idea: instead of labelling data points by hand, we use labelling functions derived from domain knowledge to automatically obtain annotations for a given dataset. The resulting labels are then aggregated with a generative model that estimates the accuracy (and possible confusions) of each labelling function. The skweak toolkit makes it easy to implement a large spectrum of labelling functions (such as heuristics, gazetteers, neural models or linguistic constraints) on text data, apply them on a corpus, and aggregate their results in a fully unsupervised fashion. skweak is especially designed to facilitate the use of weak supervision for NLP tasks such as text classification and sequence labelling. We illustrate the use of skweak for NER and sentiment analysis. skweak is released under an open-source license and is available at: https://github.com/NorskRegnesentral/skweak"
30
+ 28,Training Complex Models with Multi-Task Weak Supervision,"As machine learning models continue to increase in complexity, collecting large hand-labeled training sets has become one of the biggest roadblocks in practice. Instead, weaker forms of supervision that provide noisier but cheaper labels are often used. However, these weak supervision sources have diverse and unknown accuracies, may output correlated labels, and may label different tasks or apply at different levels of granularity. We propose a framework for integrating and modeling such weak supervision sources by viewing them as labeling different related sub-tasks of a problem, which we refer to as the multi-task weak supervision setting. We show that by solving a matrix completion-style problem, we can recover the accuracies of these multi-task sources given their dependency structure, but without any labeled data, leading to higher-quality supervision for training an end model. Theoretically, we show that the generalization error of models trained with this approach improves with the number of unlabeled data points, and characterize the scaling with respect to the task and dependency structures. On three fine-grained classification problems, we show that our approach leads to average gains of 20.2 points in accuracy over a traditional supervised approach, 6.8 points over a majority vote baseline, and 4.1 points over a previously proposed weak supervision method that models tasks separately."
31
+ 29,"Data Programming: Creating Large Training Sets, Quickly","Large labeled training sets are the critical building blocks of supervised learning methods and are key enablers of deep learning techniques. For some applications, creating labeled training sets is the most time-consuming and expensive part of applying machine learning. We therefore propose a paradigm for the programmatic creation of training sets called data programming in which users express weak supervision strategies or domain heuristics as labeling functions, which are programs that label subsets of the data, but that are noisy and may conflict. We show that by explicitly representing this training set labeling process as a generative model, we can ""denoise"" the generated training set, and establish theoretically that we can recover the parameters of these generative models in a handful of settings. We then show how to modify a discriminative loss function to make it noise-aware, and demonstrate our method over a range of discriminative models including logistic regression and LSTMs. Experimentally, on the 2014 TAC-KBP Slot Filling challenge, we show that data programming would have led to a new winning score, and also show that applying data programming to an LSTM model leads to a TAC-KBP score almost 6 F1 points over a state-of-the-art LSTM baseline (and into second place in the competition). Additionally, in initial user studies we observed that data programming may be an easier way for non-experts to create machine learning models when training data is limited or unavailable."
32
+ 30,A Survey of Data Augmentation Approaches for NLP,"Data augmentation has recently seen increased interest in NLP due to more work in low-resource domains, new tasks, and the popularity of large-scale neural networks that require large amounts of training data. Despite this recent upsurge, this area is still relatively underexplored, perhaps due to the challenges posed by the discrete nature of language data. In this paper, we present a comprehensive and unifying survey of data augmentation for NLP by summarizing the literature in a structured manner. We first introduce and motivate data augmentation for NLP, and then discuss major methodologically representative approaches. Next, we highlight techniques that are used for popular NLP applications and tasks. We conclude by outlining current challenges and directions for future research. Overall, our paper aims to clarify the landscape of existing literature in data augmentation for NLP and motivate additional work in this area. We also present a GitHub repository with a paper list that will be continuously updated at https://github.com/styfeng/DataAug4NLP"
33
+ 31,A Comprehensive Survey and Experimental Comparison of Graph-Based Approximate Nearest Neighbor Search,"Approximate nearest neighbor search (ANNS) constitutes an important operation in a multitude of applications, including recommendation systems, information retrieval, and pattern recognition. In the past decade, graph-based ANNS algorithms have been the leading paradigm in this domain, with dozens of graph-based ANNS algorithms proposed. Such algorithms aim to provide effective, efficient solutions for retrieving the nearest neighbors for a given query. Nevertheless, these efforts focus on developing and optimizing algorithms with different approaches, so there is a real need for a comprehensive survey about the approaches' relative performance, strengths, and pitfalls. Thus here we provide a thorough comparative analysis and experimental evaluation of 13 representative graph-based ANNS algorithms via a new taxonomy and fine-grained pipeline. We compared each algorithm in a uniform test environment on eight real-world datasets and 12 synthetic datasets with varying sizes and characteristics. Our study yields novel discoveries, offerings several useful principles to improve algorithms, thus designing an optimized method that outperforms the state-of-the-art algorithms. This effort also helped us pinpoint algorithms' working portions, along with rule-of-thumb recommendations about promising research directions and suitable algorithms for practitioners in different fields."
34
+ 32,A Survey on Locality Sensitive Hashing Algorithms and their Applications,"Finding nearest neighbors in high-dimensional spaces is a fundamental operation in many diverse application domains. Locality Sensitive Hashing (LSH) is one of the most popular techniques for finding approximate nearest neighbor searches in high-dimensional spaces. The main benefits of LSH are its sub-linear query performance and theoretical guarantees on the query accuracy. In this survey paper, we provide a review of state-of-the-art LSH and Distributed LSH techniques. Most importantly, unlike any other prior survey, we present how Locality Sensitive Hashing is utilized in different application domains."
35
+ 33,Leveraging Reinforcement Learning for evaluating Robustness of KNN Search Algorithms,"The problem of finding K-nearest neighbors in the given dataset for a given query point has been worked upon since several years. In very high dimensional spaces the K-nearest neighbor search (KNNS) suffers in terms of complexity in computation of high dimensional distances. With the issue of curse of dimensionality, it gets quite tedious to reliably bank on the results of variety approximate nearest neighbor search approaches. In this paper, we survey some novel K-Nearest Neighbor Search approaches that tackles the problem of Search from the perspectives of computations, the accuracy of approximated results and leveraging parallelism to speed-up computations. We attempt to derive a relationship between the true positive and false points for a given KNNS approach. Finally, in order to evaluate the robustness of a KNNS approach against adversarial points, we propose a generic Reinforcement Learning based framework for the same."
36
+ 34,Approximate Nearest Neighbor Search in High Dimensions,"The nearest neighbor problem is defined as follows: Given a set $P$ of $n$ points in some metric space $(X,D)$, build a data structure that, given any point $q$, returns a point in $P$ that is closest to $q$ (its ""nearest neighbor"" in $P$). The data structure stores additional information about the set $P$, which is then used to find the nearest neighbor without computing all distances between $q$ and $P$. The problem has a wide range of applications in machine learning, computer vision, databases and other fields. To reduce the time needed to find nearest neighbors and the amount of memory used by the data structure, one can formulate the {\em approximate} nearest neighbor problem, where the the goal is to return any point $p' \in P$ such that the distance from $q$ to $p'$ is at most $c \cdot \min_{p \in P} D(q,p)$, for some $c \geq 1$. Over the last two decades, many efficient solutions to this problem were developed. In this article we survey these developments, as well as their connections to questions in geometric functional analysis and combinatorial geometry."
37
+ 35,Hashing for Similarity Search: A Survey,"Similarity search (nearest neighbor search) is a problem of pursuing the data items whose distances to a query item are the smallest from a large database. Various methods have been developed to address this problem, and recently a lot of efforts have been devoted to approximate search. In this paper, we present a survey on one of the main solutions, hashing, which has been widely studied since the pioneering work locality sensitive hashing. We divide the hashing algorithms two main categories: locality sensitive hashing, which designs hash functions without exploring the data distribution and learning to hash, which learns hash functions according the data distribution, and review them from various aspects, including hash function design and distance measure and search scheme in the hash coding space."
38
+ 36,Experimental Analysis of Locality Sensitive Hashing Techniques for High-Dimensional Approximate Nearest Neighbor Searches,"Finding nearest neighbors in high-dimensional spaces is a fundamental operation in many multimedia retrieval applications. Exact tree-based indexing approaches are known to suffer from the notorious curse of dimensionality for high-dimensional data. Approximate searching techniques sacrifice some accuracy while returning good enough results for faster performance. Locality Sensitive Hashing (LSH) is a very popular technique for finding approximate nearest neighbors in high-dimensional spaces. Apart from providing theoretical guarantees on the query results, one of the main benefits of LSH techniques is their good scalability to large datasets because they are external memory based. The most dominant costs for existing LSH techniques are the algorithm time and the index I/Os required to find candidate points. Existing works do not compare both of these dominant costs in their evaluation. In this experimental survey paper, we show the impact of both these costs on the overall performance of the LSH technique. We compare three state-of-the-art techniques on four real-world datasets, and show that, in contrast to recent works, C2LSH is still the state-of-the-art algorithm in terms of performance while achieving similar accuracy as its recent competitors."
39
+ 37,Learning Transferable Visual Models From Natural Language Supervision,"State-of-the-art computer vision systems are trained to predict a fixed set of predetermined object categories. This restricted form of supervision limits their generality and usability since additional labeled data is needed to specify any other visual concept. Learning directly from raw text about images is a promising alternative which leverages a much broader source of supervision. We demonstrate that the simple pre-training task of predicting which caption goes with which image is an efficient and scalable way to learn SOTA image representations from scratch on a dataset of 400 million (image, text) pairs collected from the internet. After pre-training, natural language is used to reference learned visual concepts (or describe new ones) enabling zero-shot transfer of the model to downstream tasks. We study the performance of this approach by benchmarking on over 30 different existing computer vision datasets, spanning tasks such as OCR, action recognition in videos, geo-localization, and many types of fine-grained object classification. The model transfers non-trivially to most tasks and is often competitive with a fully supervised baseline without the need for any dataset specific training. For instance, we match the accuracy of the original ResNet-50 on ImageNet zero-shot without needing to use any of the 1.28 million training examples it was trained on. We release our code and pre-trained model weights at https://github.com/OpenAI/CLIP."
40
+ 38,2nd Place Solution to Facebook AI Image Similarity Challenge Matching Track,"This paper presents the 2nd place solution to the Facebook AI Image Similarity Challenge : Matching Track on DrivenData. The solution is based on self-supervised learning, and Vision Transformer(ViT). The main breaktrough comes from concatenating query and reference image to form as one image and asking ViT to directly predict from the image if query image used reference image. The solution scored 0.8291 Micro-average Precision on the private leaderboard."
41
+ 39,D$^2$LV: A Data-Driven and Local-Verification Approach for Image Copy Detection,"Image copy detection is of great importance in real-life social media. In this paper, a data-driven and local-verification (D$^2$LV) approach is proposed to compete for Image Similarity Challenge: Matching Track at NeurIPS'21. In D$^2$LV, unsupervised pre-training substitutes the commonly-used supervised one. When training, we design a set of basic and six advanced transformations, and a simple but effective baseline learns robust representation. During testing, a global-local and local-global matching strategy is proposed. The strategy performs local-verification between reference and query images. Experiments demonstrate that the proposed method is effective. The proposed approach ranks first out of 1,103 participants on the Facebook AI Image Similarity Challenge: Matching Track. The code and trained models are available at https://github.com/WangWenhao0716/ISC-Track1-Submission."
42
+ 40,D$^2$LV: A Data-Driven and Local-Verification Approach for Image Copy Detection,"Image copy detection is of great importance in real-life social media. In this paper, a data-driven and local-verification (D$^2$LV) approach is proposed to compete for Image Similarity Challenge: Matching Track at NeurIPS'21. In D$^2$LV, unsupervised pre-training substitutes the commonly-used supervised one. When training, we design a set of basic and six advanced transformations, and a simple but effective baseline learns robust representation. During testing, a global-local and local-global matching strategy is proposed. The strategy performs local-verification between reference and query images. Experiments demonstrate that the proposed method is effective. The proposed approach ranks first out of 1,103 participants on the Facebook AI Image Similarity Challenge: Matching Track. The code and trained models are available at https://github.com/WangWenhao0716/ISC-Track1-Submission."
43
+ 41,Deep Clustering for Unsupervised Learning of Visual Features,"Clustering is a class of unsupervised learning methods that has been extensively applied and studied in computer vision. Little work has been done to adapt it to the end-to-end training of visual features on large scale datasets. In this work, we present DeepCluster, a clustering method that jointly learns the parameters of a neural network and the cluster assignments of the resulting features. DeepCluster iteratively groups the features with a standard clustering algorithm, k-means, and uses the subsequent assignments as supervision to update the weights of the network. We apply DeepCluster to the unsupervised training of convolutional neural networks on large datasets like ImageNet and YFCC100M. The resulting model outperforms the current state of the art by a significant margin on all the standard benchmarks."
44
+ 42,Momentum Contrast for Unsupervised Visual Representation Learning,"We present Momentum Contrast (MoCo) for unsupervised visual representation learning. From a perspective on contrastive learning as dictionary look-up, we build a dynamic dictionary with a queue and a moving-averaged encoder. This enables building a large and consistent dictionary on-the-fly that facilitates contrastive unsupervised learning. MoCo provides competitive results under the common linear protocol on ImageNet classification. More importantly, the representations learned by MoCo transfer well to downstream tasks. MoCo can outperform its supervised pre-training counterpart in 7 detection/segmentation tasks on PASCAL VOC, COCO, and other datasets, sometimes surpassing it by large margins. This suggests that the gap between unsupervised and supervised representation learning has been largely closed in many vision tasks."
45
+ 43,A Simple Framework for Contrastive Learning of Visual Representations,"This paper presents SimCLR: a simple framework for contrastive learning of visual representations. We simplify recently proposed contrastive self-supervised learning algorithms without requiring specialized architectures or a memory bank. In order to understand what enables the contrastive prediction tasks to learn useful representations, we systematically study the major components of our framework. We show that (1) composition of data augmentations plays a critical role in defining effective predictive tasks, (2) introducing a learnable nonlinear transformation between the representation and the contrastive loss substantially improves the quality of the learned representations, and (3) contrastive learning benefits from larger batch sizes and more training steps compared to supervised learning. By combining these findings, we are able to considerably outperform previous methods for self-supervised and semi-supervised learning on ImageNet. A linear classifier trained on self-supervised representations learned by SimCLR achieves 76.5% top-1 accuracy, which is a 7% relative improvement over previous state-of-the-art, matching the performance of a supervised ResNet-50. When fine-tuned on only 1% of the labels, we achieve 85.8% top-5 accuracy, outperforming AlexNet with 100X fewer labels."
46
+ 44,Contrastive Learning with Large Memory Bank and Negative Embedding Subtraction for Accurate Copy Detection,"Copy detection, which is a task to determine whether an image is a modified copy of any image in a database, is an unsolved problem. Thus, we addressed copy detection by training convolutional neural networks (CNNs) with contrastive learning. Training with a large memory-bank and hard data augmentation enables the CNNs to obtain more discriminative representation. Our proposed negative embedding subtraction further boosts the copy detection accuracy. Using our methods, we achieved 1st place in the Facebook AI Image Similarity Challenge: Descriptor Track. Our code is publicly available here: \url{https://github.com/lyakaap/ISC21-Descriptor-Track-1st}"
47
+ 45,The 2021 Image Similarity Dataset and Challenge,"This paper introduces a new benchmark for large-scale image similarity detection. This benchmark is used for the Image Similarity Challenge at NeurIPS'21 (ISC2021). The goal is to determine whether a query image is a modified copy of any image in a reference corpus of size 1~million. The benchmark features a variety of image transformations such as automated transformations, hand-crafted image edits and machine-learning based manipulations. This mimics real-life cases appearing in social media, for example for integrity-related problems dealing with misinformation and objectionable content. The strength of the image manipulations, and therefore the difficulty of the benchmark, is calibrated according to the performance of a set of baseline approaches. Both the query and reference set contain a majority of ""distractor"" images that do not match, which corresponds to a real-life needle-in-haystack setting, and the evaluation metric reflects that. We expect the DISC21 benchmark to promote image copy detection as an important and challenging computer vision task and refresh the state of the art."
48
+ 46,Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks,"BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) has set a new state-of-the-art performance on sentence-pair regression tasks like semantic textual similarity (STS). However, it requires that both sentences are fed into the network, which causes a massive computational overhead: Finding the most similar pair in a collection of 10,000 sentences requires about 50 million inference computations (~65 hours) with BERT. The construction of BERT makes it unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. In this publication, we present Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. This reduces the effort for finding the most similar pair from 65 hours with BERT / RoBERTa to about 5 seconds with SBERT, while maintaining the accuracy from BERT. We evaluate SBERT and SRoBERTa on common STS tasks and transfer learning tasks, where it outperforms other state-of-the-art sentence embeddings methods."
49
+ 47,A Multi-task Approach for Named Entity Recognition in Social Media Data,"Named Entity Recognition for social media data is challenging because of its inherent noisiness. In addition to improper grammatical structures, it contains spelling inconsistencies and numerous informal abbreviations. We propose a novel multi-task approach by employing a more general secondary task of Named Entity (NE) segmentation together with the primary task of fine-grained NE categorization. The multi-task neural network architecture learns higher order feature representations from word and character sequences along with basic Part-of-Speech tags and gazetteer information. This neural network acts as a feature extractor to feed a Conditional Random Fields classifier. We were able to obtain the first position in the 3rd Workshop on Noisy User-generated Text (WNUT-2017) with a 41.86% entity F1-score and a 40.24% surface F1-score."
50
+ 48,Isotropy in the Contextual Embedding Space: Clusters and Manifolds,"The geometric properties of contextual embedding spaces for deep language models such as BERT and ERNIE, have attracted considerable attention in recent years. Investigations on the contextual..."
51
+ 49,GPL: Generative Pseudo Labeling for Unsupervised Domain Adaptation of Dense Retrieval,"Dense retrieval approaches can overcome the lexical gap and lead to significantly improved search results. However, they require large amounts of training data which is not available for most domains. As shown in previous work (Thakur et al., 2021b), the performance of dense retrievers severely degrades under a domain shift. This limits the usage of dense retrieval approaches to only a few domains with large training datasets. In this paper, we propose the novel unsupervised domain adaptation method Generative Pseudo Labeling (GPL), which combines a query generator with pseudo labeling from a cross-encoder. On six representative domain-specialized datasets, we find the proposed GPL can outperform an out-of-the-box state-of-the-art dense retrieval approach by up to 8.9 points nDCG@10. GPL requires less (unlabeled) data from the target domain and is more robust in its training than previous methods. We further investigate the role of six recent pre-training methods in the scenario of domain adaptation for retrieval tasks, where only three could yield improved results. The best approach, TSDAE (Wang et al., 2021) can be combined with GPL, yielding another average improvement of 1.0 points nDCG@10 across the six tasks."
52
+ 50,SimCSE: Simple Contrastive Learning of Sentence Embeddings,"This paper presents SimCSE, a simple contrastive learning framework that greatly advances the state-of-the-art sentence embeddings. We first describe an unsupervised approach, which takes an input sentence and predicts itself in a contrastive objective, with only standard dropout used as noise. This simple method works surprisingly well, performing on par with previous supervised counterparts. We find that dropout acts as minimal data augmentation and removing it leads to a representation collapse. Then, we propose a supervised approach, which incorporates annotated pairs from natural language inference datasets into our contrastive learning framework, by using “entailment” pairs as positives and “contradiction” pairs as hard negatives. We evaluate SimCSE on standard semantic textual similarity (STS) tasks, and our unsupervised and supervised models using BERT base achieve an average of 76.3% and 81.6% Spearman's correlation respectively, a 4.2% and 2.2% improvement compared to previous best results. We also show—both theoretically and empirically—that contrastive learning objective regularizes pre-trained embeddings' anisotropic space to be more uniform, and it better aligns positive pairs when supervised signals are available."
53
+ 51,An Unsupervised Sentence Embedding Method by Mutual Information Maximization,"BERT is inefficient for sentence-pair tasks such as clustering or semantic search as it needs to evaluate combinatorially many sentence pairs which is very time-consuming. Sentence BERT (SBERT) attempted to solve this challenge by learning semantically meaningful representations of single sentences, such that similarity comparison can be easily accessed. However, SBERT is trained on corpus with high-quality labeled sentence pairs, which limits its application to tasks where labeled data is extremely scarce. In this paper, we propose a lightweight extension on top of BERT and a novel self-supervised learning objective based on mutual information maximization strategies to derive meaningful sentence embeddings in an unsupervised manner. Unlike SBERT, our method is not restricted by the availability of labeled data, such that it can be applied on different domain-specific corpus. Experimental results show that the proposed method significantly outperforms other unsupervised sentence embedding baselines on common semantic textual similarity (STS) tasks and downstream supervised tasks. It also outperforms SBERT in a setting where in-domain labeled data is not available, and achieves performance competitive with supervised methods on various tasks."
54
+ 52,TSDAE: Using Transformer-based Sequential Denoising Auto-Encoder for Unsupervised Sentence Embedding Learning,"Learning sentence embeddings often requires a large amount of labeled data. However, for most tasks and domains, labeled data is seldom available and creating it is expensive. In this work, we present a new state-of-the-art unsupervised method based on pre-trained Transformers and Sequential Denoising Auto-Encoder (TSDAE) which outperforms previous approaches by up to 6.4 points. It can achieve up to 93.1% of the performance of in-domain supervised approaches. Further, we show that TSDAE is a strong domain adaptation and pre-training method for sentence embeddings, significantly outperforming other approaches like Masked Language Model. A crucial shortcoming of previous studies is the narrow evaluation: Most work mainly evaluates on the single task of Semantic Textual Similarity (STS), which does not require any domain knowledge. It is unclear if these proposed methods generalize to other domains and tasks. We fill this gap and evaluate TSDAE and other recent approaches on four different datasets from heterogeneous domains."
55
+ 53,Unsupervised Topic Segmentation of Meetings with BERT Embeddings,Topic segmentation of meetings is the task of dividing multi-person meeting transcripts into topic blocks. Supervised approaches to the problem have proven intractable due to the difficulties in collecting and accurately annotating large datasets. In this paper we show how previous unsupervised topic segmentation methods can be improved using pre-trained neural architectures. We introduce an unsupervised approach based on BERT embeddings that achieves a 15.5% reduction in error rate over existing unsupervised approaches applied to two popular datasets for meeting transcripts.
56
+ 54,Contrastive Representation Learning,"The main idea of contrastive learning is to learn representations such that similar samples stay close to each other, while dissimilar ones are far apart. Contrastive learning can be applied to both supervised and unsupervised data and has been shown to achieve good performance on a variety of vision and..."
57
+ 55,DeCLUTR: Deep Contrastive Learning for Unsupervised Textual Representations,
58
+ 56,Intriguing Properties of Contrastive Losses,"We study three intriguing properties of contrastive learning. First, we generalize the standard contrastive loss to a broader family of losses, and we find that various instantiations of the generalized loss perform similarly under the presence of a multi-layer non-linear projection head. Second, we study if instance-based contrastive learning (with a global image representation) can learn well on images with multiple objects present. We find that meaningful hierarchical local features can be learned despite the fact that these objectives operate on global instance-level features. Finally, we study the phenomenon of feature suppression among competing features shared across augmented views, such as ""color distribution"" vs ""object class"". We construct datasets with explicit and controllable competing features, and show that, for contrastive learning, a few bits of easy-to-learn shared features can suppress, and even fully prevent, the learning of other sets of competing features. In scenarios where there are multiple objects in an image, the dominant object would suppress the learning of smaller objects. Existing contrastive learning methods critically rely on data augmentation to favor certain sets of features over others, and could suffer from learning saturation for scenarios where existing augmentations cannot fully address the feature suppression. This poses open challenges to existing contrastive learning techniques."
59
+ 57,End-to-End Weak Supervision,"Aggregating multiple sources of weak supervision (WS) can ease the data-labeling bottleneck prevalent in many machine learning applications, by replacing the tedious manual collection of ground truth labels. Current state of the art approaches that do not use any labeled training data, however, require two separate modeling steps: Learning a probabilistic latent variable model based on the WS sources -- making assumptions that rarely hold in practice -- followed by downstream model training. Importantly, the first step of modeling does not consider the performance of the downstream model. To address these caveats we propose an end-to-end approach for directly learning the downstream model by maximizing its agreement with probabilistic labels generated by reparameterizing previous probabilistic posteriors with a neural network. Our results show improved performance over prior work in terms of end model performance on downstream test sets, as well as in terms of improved robustness to dependencies among weak supervision sources."
60
+ 58,Multi-Vector Models with Textual Guidance for Fine-Grained Scientific Document Similarity,"We present Aspire, a new scientific document similarity model based on matching fine-grained aspects. Our model is trained using co-citation contexts that describe related paper aspects as a novel form of textual supervision. We use multi-vector document representations, recently explored in settings with short query texts but under-explored in the challenging document-document setting. We present a fast method that involves matching only single sentence pairs, and a method that makes sparse multiple matches with optimal transport. Our model improves performance on document similarity tasks across four datasets. Moreover, our fast single-match method achieves competitive results, opening up the possibility of applying fine-grained document similarity models to large-scale scientific corpora."
61
+ 59,Hidden Technical Debt in Machine Learning Systems,
62
+ 60,Multi-Modal Generative Adversarial Network for Short Product Title Generation in Mobile E-Commerce,"Nowadays, more and more customers browse and purchase products in favor of using mobile E-Commerce Apps such as Taobao and Amazon. Since merchants are usually inclined to describe redundant and over-informative product titles to attract attentions from customers, it is important to concisely display short product titles on limited screen of mobile phones. To address this discrepancy, previous studies mainly consider textual information of long product titles and lacks of human-like view during training and evaluation process. In this paper, we propose a Multi-Modal Generative Adversarial Network (MM-GAN) for short product title generation in E-Commerce, which innovatively incorporates image information and attribute tags from product, as well as textual information from original long titles. MM-GAN poses short title generation as a reinforcement learning process, where the generated titles are evaluated by the discriminator in a human-like view. Extensive experiments on a large-scale E-Commerce dataset demonstrate that our algorithm outperforms other state-of-the-art methods. Moreover, we deploy our model into a real-world online E-Commerce environment and effectively boost the performance of click through rate and click conversion rate by 1.66% and 1.87%, respectively."
63
+ 61,Fast Approximate Nearest Neighbor Search With The Navigating Spreading-out Graph,"Approximate nearest neighbor search (ANNS) is a fundamental problem in databases and data mining. A scalable ANNS algorithm should be both memory-efficient and fast. Some early graph-based approaches have shown attractive theoretical guarantees on search time complexity, but they all suffer from the problem of high indexing time complexity. Recently, some graph-based methods have been proposed to reduce indexing complexity by approximating the traditional graphs; these methods have achieved revolutionary performance on million-scale datasets. Yet, they still can not scale to billion-node databases. In this paper, to further improve the search-efficiency and scalability of graph-based methods, we start by introducing four aspects: (1) ensuring the connectivity of the graph; (2) lowering the average out-degree of the graph for fast traversal; (3) shortening the search path; and (4) reducing the index size. Then, we propose a novel graph structure called Monotonic Relative Neighborhood Graph (MRNG) which guarantees very low search complexity (close to logarithmic time). To further lower the indexing complexity and make it practical for billion-node ANNS problems, we propose a novel graph structure named Navigating Spreading-out Graph (NSG) by approximating the MRNG. The NSG takes the four aspects into account simultaneously. Extensive experiments show that NSG outperforms all the existing algorithms significantly. In addition, NSG shows superior performance in the E-commercial search scenario of Taobao (Alibaba Group) and has been integrated into their search engine at billion-node scale."
64
+ 62,An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale,"While the Transformer architecture has become the de-facto standard for natural language processing tasks, its applications to computer vision remain limited. In vision, attention is either applied in conjunction with convolutional networks, or used to replace certain components of convolutional networks while keeping their overall structure in place. We show that this reliance on CNNs is not necessary and a pure transformer applied directly to sequences of image patches can perform very well on image classification tasks. When pre-trained on large amounts of data and transferred to multiple mid-sized or small image recognition benchmarks (ImageNet, CIFAR-100, VTAB, etc.), Vision Transformer (ViT) attains excellent results compared to state-of-the-art convolutional networks while requiring substantially fewer computational resources to train."
65
+ 63,Milvus: A Purpose-Built Vector Data Management System,"Recently, there has been a pressing need to manage high-dimensional vector data in data science and AI applications. This trend is fueled by the proliferation of unstructured data and machine learning (ML), where ML models usually transform unstructured data into feature vectors for data analytics, e.g., product recommendation. Existing systems and algorithms for managing vector data have two limitations: (1) They incur serious performance issue when handling large-scale and dynamic vector data; and (2) They provide limited functionalities that cannot meet the requirements of versatile applications. This paper presents Milvus, a purpose-built data management system to e�ciently manage large-scale vector data. Milvus supports easy-to-use application interfaces (including SDKs and RESTful APIs); optimizes for the heterogeneous computing platform with modern CPUs and GPUs; enables advanced query processing beyond simple vector similarity search; handles dynamic data for fast updates while ensuring e�cient query processing; and distributes data across multiple nodes to achieve scalability and availability. We �rst describe the design and implementation of Milvus. Then we demonstrate the real-world use cases supported by Milvus. In particular, we build a series of 10 applications (e.g., image/video search, chemical structure analysis, COVID-19 dataset search, personalized recommendation, biological multi-factor authentication, intelligent question answering) on top of Milvus. Finally, we experimentally evaluate Milvus with a wide range of systems including two open source systems (Vearch and Microsoft SPTAG) and three commercial systems. Experiments show that Milvus is up to two orders of magnitude faster than the competitors while providing more functionalities. Now Milvus is deployed by hundreds of organizations worldwide and it is also recognized as an incubation-stage project of the LF AI & Data Foundation. Milvus is open-sourced at https:// github.com/ milvus-io/ milvus."
66
+ 64,Want To Reduce Labeling Cost? GPT-3 Can Help,"Data annotation is a time-consuming and labor-intensive process for many NLP tasks. Although there exist various methods to produce pseudo data labels, they are often task-specific and require a decent amount of labeled data to start with. Recently, the immense language model GPT-3 with 175 billion parameters has achieved tremendous improvement across many few-shot learning tasks. In this paper, we explore ways to leverage GPT-3 as a low-cost data labeler to train other models. We find that, to make the downstream model achieve the same performance on a variety of NLU and NLG tasks, it costs 50% to 96% less to use labels from GPT-3 than using labels from humans. Furthermore, we propose a novel framework of combining pseudo labels from GPT-3 with human labels, which leads to even better performance with limited labeling budget. These results present a cost-effective data labeling methodology that is generalizable to many practical applications."
67
+ 65,AMMUS : A Survey of Transformer-based Pretrained Models in Natural Language Processing,"Transformer-based pretrained language models (T-PTLMs) have achieved great success in almost every NLP task. The evolution of these models started with GPT and BERT. These models are built on the top of transformers, self-supervised learning and transfer learning. Transformed-based PTLMs learn universal language representations from large volumes of text data using self-supervised learning and transfer this knowledge to downstream tasks. These models provide good background knowledge to downstream tasks which avoids training of downstream models from scratch. In this comprehensive survey paper, we initially give a brief overview of self-supervised learning. Next, we explain various core concepts like pretraining, pretraining methods, pretraining tasks, embeddings and downstream adaptation methods. Next, we present a new taxonomy of T-PTLMs and then give brief overview of various benchmarks including both intrinsic and extrinsic. We present a summary of various useful libraries to work with T-PTLMs. Finally, we highlight some of the future research directions which will further improve these models. We strongly believe that this comprehensive survey paper will serve as a good reference to learn the core concepts as well as to stay updated with the recent happenings in T-PTLMs."
68
+ 66,LANNS: A Web-Scale Approximate Nearest Neighbor Lookup System,"Nearest neighbor search (NNS) has a wide range of applications in information retrieval, computer vision, machine learning, databases, and other areas. Existing state-of-the-art algorithm for nearest neighbor search, Hierarchical Navigable Small World Networks(HNSW), is unable to scale to large datasets of 100M records in high dimensions. In this paper, we propose LANNS, an end-to-end platform for Approximate Nearest Neighbor Search, which scales for web-scale datasets. Library for Large Scale Approximate Nearest Neighbor Search (LANNS) is deployed in multiple production systems for identifying topK ($100 \leq topK \leq 200$) approximate nearest neighbors with a latency of a few milliseconds per query, high throughput of 2.5k Queries Per Second (QPS) on a single node, on large ($\sim$180M data points) high dimensional (50-2048 dimensional) datasets."
69
+ 67,Announcing ScaNN: Efficient Vector Similarity Search,"Posted by Philip Sun, Software Engineer, Google Research Suppose one wants to search through a large dataset of literary works using que..."
70
+ 68,CARLA: A Python Library to Benchmark Algorithmic Recourse and Counterfactual Explanation Algorithms,"Counterfactual explanations provide means for prescriptive model explanations by suggesting actionable feature changes (e.g., increase income) that allow individuals to achieve favorable outcomes in the future (e.g., insurance approval). Choosing an appropriate method is a crucial aspect for meaningful counterfactual explanations. As documented in recent reviews, there exists a quickly growing literature with available methods. Yet, in the absence of widely available opensource implementations, the decision in favor of certain models is primarily based on what is readily available. Going forward - to guarantee meaningful comparisons across explanation methods - we present CARLA (Counterfactual And Recourse LibrAry), a python library for benchmarking counterfactual explanation methods across both different data sets and different machine learning models. In summary, our work provides the following contributions: (i) an extensive benchmark of 11 popular counterfactual explanation methods, (ii) a benchmarking framework for research on future counterfactual explanation methods, and (iii) a standardized set of integrated evaluation measures and data sets for transparent and extensive comparisons of these methods. We have open-sourced CARLA and our experimental results on Github, making them available as competitive baselines. We welcome contributions from other research groups and practitioners."
71
+ 69,Abstract syntax tree,"In computer science, an abstract syntax tree (AST), or just syntax tree, is a tree representation of the abstract syntactic structure of source code written in a programming language. Each node of the tree denotes a construct occurring in the source code. The syntax is ""abstract"" in the sense that it does not represent every detail appearing in the real syntax, but rather just the structural or content-related details. For instance, grouping parentheses are implicit in the tree structure, so these do not have to be represented as separate nodes. Likewise, a syntactic construct like an if-condition-then expression may be denoted by means of a single node with three branches. This distinguishes abstract syntax trees from concrete syntax trees, traditionally designated parse trees. Parse trees are typically built by a parser during the source code translation and compiling process. Once built, additional information is added to the AST by means of subsequent processing, e.g., contextual analysis. Abstract syntax trees are also used in program analysis and program transformation systems."
72
+ 70,Snorkel: Fast Training Set Generation for Information Extraction,"State-of-the art machine learning methods such as deep learning rely on large sets of hand-labeled training data. Collecting training data is prohibitively slow and expensive, especially when technical domain expertise is required; even the largest technology companies struggle with this challenge1. We address this critical bottleneck with Snorkel2, a new system for quickly creating, managing, and modeling training sets. Snorkel enables users to generate large volumes of training data by writing labeling functions, which are simple functions that express heuristics and other weak supervision strategies. These user-authored labeling functions may have low accuracies and may overlap and conflict, but Snorkel automatically learns their accuracies and synthesizes their output labels. Experiments and theory [3, 4] show that surprisingly, by modeling the labeling process in this way, we can train high-accuracy machine learning models even using potentially lower-accuracy inputs. Snorkel is currently used in production at top technology and consulting companies, and used by researchers to extract information from electronic health records, after-action combat reports, and the scientific literature. In this demonstration, we focus on the challenging task of information extraction, a common application of Snorkel in practice. Using the task of extracting corporate employment relationships from news articles, we will demonstrate and build intuition for a radically different way of developing machine learning systems which allows us to effectively bypass the bottleneck of hand-labeling training data."
73
+ 71,Snuba: Automating Weak Supervision to Label Training Data,"As deep learning models are applied to increasingly diverse problems, a key bottleneck is gathering enough high-quality training labels tailored to each task. Users therefore turn to weak supervision, relying on imperfect sources of labels like pattern matching and user-defined heuristics. Unfortunately, users have to design these sources for each task. This process can be time consuming and expensive: domain experts often perform repetitive steps like guessing optimal numerical thresholds and developing informative text patterns. To address these challenges, we present Snuba, a system to automatically generate heuristics using a small labeled dataset to assign training labels to a large, unlabeled dataset in the weak supervision setting. Snuba generates heuristics that each labels the subset of the data it is accurate for, and iteratively repeats this process until the heuristics together label a large portion of the unlabeled data. We develop a statistical measure that guarantees the iterative process will automatically terminate before it degrades training label quality. Snuba automatically generates heuristics in under five minutes and performs up to 9.74 F1 points better than the best known user-defined heuristics developed over many days. In collaborations with users at research labs, Stanford Hospital, and on open source datasets, Snuba outperforms other automated approaches like semisupervised learning by up to 14.35 F1 points."
74
+ 72,Interactive Programmatic Labeling for Weak Supervision,"The standard supervised machine learning pipeline involves labeling individual training data points, which is often prohibitively slow and expensive. New programmatic or weak supervision approaches expedite this process by having users instead write labeling functions, simple rules or other heuristic functions that label subsets of a dataset. While these types of programmatic labeling approaches can provide significant advantages over labeling training sets by hand, there is usually little formal structure or guidance for how these labeling functions are created by users. We perform an initial exploration of processes through which users can be guided by asking them to write labeling functions over specifically-chosen subsets of the data. This can be viewed as a new form of active learning—a traditional technique wherein data points are intelligently chosen for labeling—applied at the labeling function level. We show in synthetic and real-world experiments how two simple labeling function acquisition strategies outperform a random baseline. In our real-world experiment we observe a 1-2% increase in accuracy after the first 100 labeling functions when using our acquisition strategies, which corresponds to a 2× reduction in the amount of data required to achieve a fixed accuracy."
75
+ 73,Snorkel: Rapid Training Data Creation with Weak Supervision,"Labeling training data is increasingly the largest bottleneck in deploying machine learning systems. We present Snorkel, a first-of-its-kind system that enables users to train state-of-the-art models without hand labeling any training data. Instead, users write labeling functions that express arbitrary heuristics, which can have unknown accuracies and correlations. Snorkel denoises their outputs without access to ground truth by incorporating the first end-to-end implementation of our recently proposed machine learning paradigm, data programming. We present a flexible interface layer for writing labeling functions based on our experience over the past year collaborating with companies, agencies, and research labs. In a user study, subject matter experts build models 2.8x faster and increase predictive performance an average 45.5% versus seven hours of hand labeling. We study the modeling tradeoffs in this new setting and propose an optimizer for automating tradeoff decisions that gives up to 1.8x speedup per pipeline execution. In two collaborations, with the U.S. Department of Veterans Affairs and the U.S. Food and Drug Administration, and on four open-source text and image data sets representative of other deployments, Snorkel provides 132% average improvements to predictive performance over prior heuristic approaches and comes within an average 3.60% of the predictive performance of large hand-curated training sets."
76
+ 74,WikiLingua: A New Benchmark Dataset for Cross-Lingual Abstractive Summarization,"We introduce WikiLingua, a large-scale, multilingual dataset for the evaluation of crosslingual abstractive summarization systems. We extract article and summary pairs in 18 languages from WikiHow, a high quality, collaborative resource of how-to guides on a diverse set of topics written by human authors. We create gold-standard article-summary alignments across languages by aligning the images that are used to describe each how-to step in an article. As a set of baselines for further studies, we evaluate the performance of existing cross-lingual abstractive summarization methods on our dataset. We further propose a method for direct crosslingual summarization (i.e., without requiring translation at inference time) by leveraging synthetic data and Neural Machine Translation as a pre-training step. Our method significantly outperforms the baseline approaches, while being more cost efficient during inference."
77
+ 75,Swin Transformer: Hierarchical Vision Transformer using Shifted Windows,"This paper presents a new vision Transformer, called Swin Transformer, that capably serves as a general-purpose backbone for computer vision. Challenges in adapting Transformer from language to vision arise from differences between the two domains, such as large variations in the scale of visual entities and the high resolution of pixels in images compared to words in text. To address these differences, we propose a hierarchical Transformer whose representation is computed with \textbf{S}hifted \textbf{win}dows. The shifted windowing scheme brings greater efficiency by limiting self-attention computation to non-overlapping local windows while also allowing for cross-window connection. This hierarchical architecture has the flexibility to model at various scales and has linear computational complexity with respect to image size. These qualities of Swin Transformer make it compatible with a broad range of vision tasks, including image classification (87.3 top-1 accuracy on ImageNet-1K) and dense prediction tasks such as object detection (58.7 box AP and 51.1 mask AP on COCO test-dev) and semantic segmentation (53.5 mIoU on ADE20K val). Its performance surpasses the previous state-of-the-art by a large margin of +2.7 box AP and +2.6 mask AP on COCO, and +3.2 mIoU on ADE20K, demonstrating the potential of Transformer-based models as vision backbones. The hierarchical design and the shifted window approach also prove beneficial for all-MLP architectures. The code and models are publicly available at~\url{https://github.com/microsoft/Swin-Transformer}."
helpers.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from pyvis.network import Network
3
+ import plotly.express as px
4
+ from sklearn.metrics.pairwise import cosine_similarity
5
+ from sentence_transformers import SentenceTransformer
6
+ from bertopic import BERTopic
7
+ from sklearn.feature_extraction.text import CountVectorizer
8
+ import pandas as pd
9
+ import numpy as np
10
+ import networkx as nx
11
+
12
+
13
+ def reset_default_topic_sliders(min_topic_size, n_gram_range):
14
+ st.session_state['min_topic_size'] = min_topic_size
15
+ st.session_state['n_gram_range'] = n_gram_range
16
+
17
+
18
+ def reset_default_threshold_slider(threshold):
19
+ st.session_state['threshold'] = threshold
20
+
21
+
22
+ @st.cache(allow_output_mutation=True)
23
+ def load_sbert_model():
24
+ return SentenceTransformer('allenai-specter')
25
+
26
+
27
+ @st.cache()
28
+ def load_data(uploaded_file):
29
+ data = pd.read_csv(uploaded_file)
30
+
31
+ data = data[['Title', 'Abstract']]
32
+ data = data.dropna()
33
+ data = data.reset_index(drop=True)
34
+
35
+ return data
36
+
37
+
38
+ @st.cache(allow_output_mutation=True)
39
+ def topic_modeling(data, min_topic_size, n_gram_range):
40
+ """Topic modeling using BERTopic
41
+ """
42
+ topic_model = BERTopic(
43
+ embedding_model=load_sbert_model(),
44
+ vectorizer_model=CountVectorizer(
45
+ stop_words='english', ngram_range=n_gram_range),
46
+ min_topic_size=min_topic_size
47
+ )
48
+
49
+ # For 'allenai-specter'
50
+ data['Title + Abstract'] = data['Title'] + '[SEP]' + data['Abstract']
51
+
52
+ # Train the topic model
53
+ data["Topic"], data["Probs"] = topic_model.fit_transform(
54
+ data['Title + Abstract'])
55
+
56
+ # Merge topic results
57
+ topic_df = topic_model.get_topic_info()[['Topic', 'Name']]
58
+ data = data.merge(topic_df, on='Topic', how='left')
59
+
60
+ # Topics
61
+ topics = topic_df.set_index('Topic').to_dict(orient='index')
62
+
63
+ return data, topic_model, topics
64
+
65
+
66
+ @st.cache(allow_output_mutation=True)
67
+ def embeddings(data):
68
+ data['embedding'] = load_sbert_model().encode(
69
+ data['Title + Abstract']).tolist()
70
+
71
+ return data
72
+
73
+
74
+ @st.cache()
75
+ def cosine_sim(data):
76
+ cosine_sim_matrix = cosine_similarity(data['embedding'].values.tolist())
77
+
78
+ # Take only upper triangular matrix
79
+ cosine_sim_matrix = np.triu(cosine_sim_matrix, k=1)
80
+
81
+ return cosine_sim_matrix
82
+
83
+
84
+ @st.cache()
85
+ def calc_max_connections(num_papers, ratio):
86
+ n = ratio*num_papers
87
+
88
+ return n*(n-1)/2
89
+
90
+
91
+ @st.cache()
92
+ def calc_optimal_threshold(cosine_sim_matrix, max_connections):
93
+ """Calculates the optimal threshold for the cosine similarity matrix.
94
+ Allows a max of max_connections
95
+ """
96
+ thresh_sweep = np.arange(0.05, 1.05, 0.05)
97
+ for idx, threshold in enumerate(thresh_sweep):
98
+ neighbors = np.argwhere(cosine_sim_matrix >= threshold).tolist()
99
+ if len(neighbors) < max_connections:
100
+ break
101
+
102
+ return round(thresh_sweep[idx-1], 2).item(), round(thresh_sweep[idx], 2).item()
103
+
104
+
105
+ @st.cache()
106
+ def calc_neighbors(cosine_sim_matrix, threshold):
107
+ neighbors = np.argwhere(cosine_sim_matrix >= threshold).tolist()
108
+
109
+ return neighbors, len(neighbors)
110
+
111
+
112
+ def nx_hash_func(nx_net):
113
+ """Hash function for NetworkX graphs.
114
+ """
115
+ return (list(nx_net.nodes()), list(nx_net.edges()))
116
+
117
+
118
+ def pyvis_hash_func(pyvis_net):
119
+ """Hash function for pyvis graphs.
120
+ """
121
+ return (pyvis_net.nodes, pyvis_net.edges)
122
+
123
+
124
+ @st.cache(hash_funcs={nx.Graph: nx_hash_func, Network: pyvis_hash_func})
125
+ def network_plot(data, topics, neighbors):
126
+ """Creates a network plot of connected papers. Colored by Topic Model topics.
127
+ """
128
+ nx_net = nx.Graph()
129
+ pyvis_net = Network(height='750px', width='100%', bgcolor='#222222')
130
+
131
+ # Add Nodes
132
+ nodes = [
133
+ (
134
+ row.Index,
135
+ {
136
+ 'group': row.Topic,
137
+ 'label': row.Index,
138
+ 'title': row.Title,
139
+ 'size': 20, 'font': {'size': 20, 'color': 'white'}
140
+ }
141
+ )
142
+ for row in data.itertuples()
143
+ ]
144
+ nx_net.add_nodes_from(nodes)
145
+ assert(nx_net.number_of_nodes() == len(data))
146
+
147
+ # Add Legend Nodes
148
+ step = 150
149
+ x = -2000
150
+ y = -500
151
+ legend_nodes = [
152
+ (
153
+ len(data)+idx,
154
+ {
155
+ 'group': key, 'label': ', '.join(value['Name'].split('_')[1:]),
156
+ 'size': 30, 'physics': False, 'x': x, 'y': f'{y + idx*step}px',
157
+ # , 'fixed': True,
158
+ 'shape': 'box', 'widthConstraint': 1000, 'font': {'size': 40, 'color': 'black'}
159
+ }
160
+ )
161
+ for idx, (key, value) in enumerate(topics.items())
162
+ ]
163
+ nx_net.add_nodes_from(legend_nodes)
164
+
165
+ # Add Edges
166
+ nx_net.add_edges_from(neighbors)
167
+ assert(nx_net.number_of_edges() == len(neighbors))
168
+
169
+ # Plot the Pyvis graph
170
+ pyvis_net.from_nx(nx_net)
171
+
172
+ return nx_net, pyvis_net
173
+
174
+
175
+ @st.cache()
176
+ def network_centrality(data, centrality, centrality_option):
177
+ """Calculates the centrality of the network
178
+ """
179
+ # Sort Top 10 Central nodes
180
+ central_nodes = sorted(
181
+ centrality.items(), key=lambda item: item[1], reverse=True)
182
+ central_nodes = pd.DataFrame(central_nodes, columns=[
183
+ 'node', centrality_option]).set_index('node')
184
+
185
+ joined_data = data.join(central_nodes)
186
+ top_central_nodes = joined_data.sort_values(
187
+ centrality_option, ascending=False).head(10)
188
+
189
+ # Plot the Top 10 Central nodes
190
+ fig = px.bar(top_central_nodes, x=centrality_option, y='Title')
191
+ fig.update_layout(yaxis={'categoryorder': 'total ascending'},
192
+ font={'size': 15},
193
+ height=800, width=800)
194
+ return fig
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ bertopic==0.9.4
2
+ networkx==2.6.3
3
+ numpy==1.20.0
4
+ pandas==1.3.4
5
+ plotly==5.1.0
6
+ pyvis==0.1.9
7
+ scikit_learn==1.0.2
8
+ sentence_transformers==2.1.0
9
+ streamlit==1.3.1