|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T06:07:22.692737Z" |
|
}, |
|
"title": "Analyzing Curriculum Learning for Sentiment Analysis along Task Difficulty, Pacing and Visualization Axes", |
|
"authors": [ |
|
{ |
|
"first": "Anvesh", |
|
"middle": [], |
|
"last": "Rao", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Kaveri", |
|
"middle": [], |
|
"last": "Anuranjana", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "kaveri.anuranjana@research.iiit.ac.in" |
|
}, |
|
{ |
|
"first": "Radhika", |
|
"middle": [], |
|
"last": "Mamidi", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "radhika.mamidi@iiit.ac.in" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "While Curriculum Learning (CL) has recently gained traction in Natural language Processing Tasks, it is still not adequately analyzed. Previous works only show their effectiveness but fail short to explain and interpret the internal workings fully. In this paper, we analyze curriculum learning in sentiment analysis along multiple axes. Some of these axes have been proposed by earlier works that need more in-depth study. Such analysis requires understanding where curriculum learning works and where it does not. Our axes of analysis include Task difficulty on CL, comparing CL pacing techniques, and qualitative analysis by visualizing the movement of attention scores in the model as curriculum phases progress. We find that curriculum learning works best for difficult tasks and may even lead to a decrement in performance for tasks with higher performance without curriculum learning. We see that One-Pass curriculum strategies suffer from catastrophic forgetting and attention movement visualization within curriculum pacing. This shows that curriculum learning breaks down the challenging main task into easier sub-tasks solved sequentially.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "While Curriculum Learning (CL) has recently gained traction in Natural language Processing Tasks, it is still not adequately analyzed. Previous works only show their effectiveness but fail short to explain and interpret the internal workings fully. In this paper, we analyze curriculum learning in sentiment analysis along multiple axes. Some of these axes have been proposed by earlier works that need more in-depth study. Such analysis requires understanding where curriculum learning works and where it does not. Our axes of analysis include Task difficulty on CL, comparing CL pacing techniques, and qualitative analysis by visualizing the movement of attention scores in the model as curriculum phases progress. We find that curriculum learning works best for difficult tasks and may even lead to a decrement in performance for tasks with higher performance without curriculum learning. We see that One-Pass curriculum strategies suffer from catastrophic forgetting and attention movement visualization within curriculum pacing. This shows that curriculum learning breaks down the challenging main task into easier sub-tasks solved sequentially.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Learning in humans has always been a systematic approach of handling the fundamentals first and then learning incrementally harder concepts. Cognitive Science has established that this leads to a clearer, robust understanding and the most efficient learning (Krueger and Dayan, 2009; Avrahami et al., 1997) . Indeed, something similar can be applied while training neural networks. (Bengio et al., 2009) show that Curriculum Learning (CL) -sampling data based on increasing order of difficulty leads to quicker generalization. (Weinshall * The authors contributed equally to the work. et al., 2018) also demonstrate that CL increases the rate of convergence at the beginning of training. Their CL strategy involved sorting the training data based on transfer learning from another network trained on a larger dataset. The idea of reordering samples has been explored in various approaches. In this paper, we evaluate the \"easiness\" with a network and train the samples on another network. Hence, even in our case, we shall pick easier points regarding a target hypothesis then train another network that optimizes its current hypothesis. This idea has been suggested by previous works as well. (Hacohen and Weinshall, 2019; Weinshall et al., 2018) . (Cirik et al., 2016) proposed Baby Steps and One Pass curriculum techniques using sentence length as a curriculum strategy for training LSTM (Hochreiter and Schmidhuber, 1997) on Sentiment Analysis. A tree-structured curriculum ordering based on semantic similarity is proposed by (Han and Myaeng, 2017) . propose an auxiliary network that is first trained on the dataset and used to calculate difficulty scores for the curriculum order.", |
|
"cite_spans": [ |
|
{ |
|
"start": 258, |
|
"end": 283, |
|
"text": "(Krueger and Dayan, 2009;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 284, |
|
"end": 306, |
|
"text": "Avrahami et al., 1997)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 382, |
|
"end": 403, |
|
"text": "(Bengio et al., 2009)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 527, |
|
"end": 539, |
|
"text": "(Weinshall *", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 585, |
|
"end": 598, |
|
"text": "et al., 2018)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1194, |
|
"end": 1223, |
|
"text": "(Hacohen and Weinshall, 2019;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 1224, |
|
"end": 1247, |
|
"text": "Weinshall et al., 2018)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 1250, |
|
"end": 1270, |
|
"text": "(Cirik et al., 2016)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 1391, |
|
"end": 1425, |
|
"text": "(Hochreiter and Schmidhuber, 1997)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 1531, |
|
"end": 1553, |
|
"text": "(Han and Myaeng, 2017)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "CL is also used in NLP within tasks such as Question Answering Xing, 2016, 2018) and NLG for Answer Generation (Liu et al., 2018) . For Sentiment Analysis, (Cirik et al., 2016) propose a strategy derived from sentence length, where smaller sentences are considered easier and are provided first. (Han and Myaeng, 2017) provide a tree-structured curriculum based on semantic similarity between new samples and samples already trained. (Tsvetkov et al., 2016 ) suggest a curriculum based on handcrafted semantic, linguistic, syntactic features for word representation learning.", |
|
"cite_spans": [ |
|
{ |
|
"start": 63, |
|
"end": 80, |
|
"text": "Xing, 2016, 2018)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 111, |
|
"end": 129, |
|
"text": "(Liu et al., 2018)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 156, |
|
"end": 176, |
|
"text": "(Cirik et al., 2016)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 296, |
|
"end": 318, |
|
"text": "(Han and Myaeng, 2017)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 434, |
|
"end": 456, |
|
"text": "(Tsvetkov et al., 2016", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Some of these works (Cirik et al., 2016; Han and Myaeng, 2017; have suggested that Baby Steps performs better than One Pass. We perform experiments using both techniques. While the idea of curriculum remains the same across these works, the strategy itself to decide sample ordering is often diverse.", |
|
"cite_spans": [ |
|
{ |
|
"start": 20, |
|
"end": 40, |
|
"text": "(Cirik et al., 2016;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 41, |
|
"end": 62, |
|
"text": "Han and Myaeng, 2017;", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "While Curriculum Learning as defined by (Bengio et al., 2009) is not constrained by a strict description, later related works (Cirik et al., 2016; Han and Myaeng, 2017; Spitkovsky et al., 2010; make distinctions between Baby Steps curriculum and One-Pass curriculum. Most of these previous works have also shown the dominance of Baby Steps over One-Pass. Baby Steps and One Pass curriculum can be defined as follows. For every sentence s i \u2208 D, its sentiment is described as y i \u2208 {0, 1, 2, 3, 4}, where i \u2208 {1, 2...n} for n data points in D. For a model f w , its prediction based on s i will be f w (s i ). Loss L is defined on the model prediction and actual output as L(y i , f w (s i )) and Cost defining the task as", |
|
"cite_spans": [ |
|
{ |
|
"start": 40, |
|
"end": 61, |
|
"text": "(Bengio et al., 2009)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 126, |
|
"end": 146, |
|
"text": "(Cirik et al., 2016;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 147, |
|
"end": 168, |
|
"text": "Han and Myaeng, 2017;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 169, |
|
"end": 193, |
|
"text": "Spitkovsky et al., 2010;", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Axis I: Curriculum Learning: One Pass and Baby Steps", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "C(D, f w ) as \u2200i 1 n L(y i , f w (s i ))", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Axis I: Curriculum Learning: One Pass and Baby Steps", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Here, curriculum strategy S(s i ) defines an \"easiness\"/\"difficulty\" quotient of sample s i . Furthermore, One Pass makes distinct, mutually exclusive sets of the training data and trains on each one of these sets one by one. This makes it faster as compared to Baby Steps, where data cumulatively increases in each pass. This implies that the model is trained on previous data and the additional harder data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Axis I: Curriculum Learning: One Pass and Baby Steps", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "To analyze the two methods for executing CL we choose two curriculum strategies (difficulty scoring function). Furthermore, we also experiment with an individual setting explained in following sections.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Axis I: Curriculum Learning: One Pass and Baby Steps", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Following previous works in curriculum-driven sentiment analysis (Cirik et al., 2016; Han and Myaeng, 2017; Tsvetkov et al., 2016; We use the Stanford Sentiment Treebank (SST) dataset (Socher et al., 2013) . Unlike most sentiment analysis datasets with binary labels, SST is for a 5-class classification consisting of 8544/1101/2210 samples in train, development,", |
|
"cite_spans": [ |
|
{ |
|
"start": 65, |
|
"end": 85, |
|
"text": "(Cirik et al., 2016;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 86, |
|
"end": 107, |
|
"text": "Han and Myaeng, 2017;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 108, |
|
"end": 130, |
|
"text": "Tsvetkov et al., 2016;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 184, |
|
"end": 205, |
|
"text": "(Socher et al., 2013)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Our dataset has 5 labels. https://nlp.stanford.edu/sentiment/ and test set. We use this standard split with reported results averaged over 5 turns.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "We use the popular transformer model BERT (Devlin et al., 2019) for our experiments due to how ubiquitous it is across natural language processing tasks. Bidirectional Encoder Representations from Transformers (BERT) is a masked language model trained on large corpora. A sentence is added with a special token (CLS) at the beginning and is passed into the pre-trained BERT model. It tokenizes the sentence with a maximum length of 512 and outputs a contextual representation for each of the tokenized words. There are variants of pre-trained BERT depending upon the hyper-parameters of the model. BERT-Base Uncased consists of 12 transformer encoders, and output from each token is a 768 dimension embedding. We apply a softmax layer over the first token position (CLS) output by the BERT model for sentiment analysis.", |
|
"cite_spans": [ |
|
{ |
|
"start": 42, |
|
"end": 63, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Details: BERT", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Auxiliary Model Strategy is based on previous works (Weinshall et al., 2018; Hacohen and Weinshall, 2019; which propose a difficulty scoring function, transfer learned from an another network. We first train an auxiliary model Aux for sentiment analysis on the same dataset. This Aux model architecture will be the same as the model finally used for CL. This allows us to find out which training samples are actually difficult. We learn what samples are the most difficult to classify and what are the easiest from this model. For all training samples of D, we define the curriculum score as follows:", |
|
"cite_spans": [ |
|
{ |
|
"start": 52, |
|
"end": 76, |
|
"text": "(Weinshall et al., 2018;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 77, |
|
"end": 105, |
|
"text": "Hacohen and Weinshall, 2019;", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Auxiliary Model Strategy", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "S(s i ) = c j (Aux(s i ) j \u2212 y j i ) 2", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Auxiliary Model Strategy", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "where Aux(s i ) j is the prediction of auxiliary model Aux on sentence s i , j is the iterator over the number of classes c = 5. In essence, we find the mean squared error between the prediction and the sentence's true labels. If S(s i ) is high, it implies the sentence is hard to classify, and if less, then the sentence is easy. Because the features were trained on an auxiliary model from BERT features, we get an easiness-difficulty score purely from the perspective of sentiment analysis. Section A gives some examples of difficult and easy samples according to this curriculum.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Auxiliary Model Strategy", |
|
"sec_num": "2.3.1" |
|
}, |
|
{ |
|
"text": "This simple strategy tells that architectures, especially LSTM find it difficult to classify longer sentences. Hence, longer sentences are difficult and should be ordered later. Conversely, shorter sentence lengths are easier and should be trained first. This strategy is prevalent and has not only been used in sentiment analysis (Cirik et al., 2016) but also in dependency parsing (Spitkovsky et al., 2010) . This is why it becomes a strong comparison metric, especially to evaluate the distinction between One Pass and Baby Steps.", |
|
"cite_spans": [ |
|
{ |
|
"start": 331, |
|
"end": 351, |
|
"text": "(Cirik et al., 2016)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 383, |
|
"end": 408, |
|
"text": "(Spitkovsky et al., 2010)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Sentence Length", |
|
"sec_num": "2.3.2" |
|
}, |
|
{ |
|
"text": "In this strategy, we report accuracy on the test set when trained on D 1 ,D 2 , and so on individually. This is by no means a curriculum strategy since no model ever sees the complete training data. The Individual experiment can be thought of as One Pass, but the weights are reset after every training phase. (Cirik et al., 2016; Han and Myaeng, 2017; . One Pass is less timeconsuming since it only observes a sample once, (Cirik et al., 2016) have done on CL SST as well. However, our numbers do not match because they use the much larger phrase dataset.", |
|
"cite_spans": [ |
|
{ |
|
"start": 310, |
|
"end": 330, |
|
"text": "(Cirik et al., 2016;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 331, |
|
"end": 352, |
|
"text": "Han and Myaeng, 2017;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 424, |
|
"end": 444, |
|
"text": "(Cirik et al., 2016)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Individual", |
|
"sec_num": "2.3.3" |
|
}, |
|
{ |
|
"text": "unlike Baby Steps which repeatedly sees samples from the previous and the current phase. Furthermore, we see that auxiliary outperforms sentence length. Finally, Figure 1 illustrates the reason for One Pass's weakness. We observe that on successive phases, One Pass closely follows the curve of the proposed Individual experiment. However, unlike One Pass, Individual has no memory of samples at previous stages. This shows that in every phase of One Pass, the model forgets the previous stage samples and effectively behaves like Individual, hence catastrophically forgetting previous phases. Furthermore, this is especially a major issue for the myriad of methods involving the Language Model pre-training and downstream tasks fine-tuning Paradigm. Language Model backed transformers (Vaswani et al., 2017) are now ubiquitous for tasks across Natural Language Understanding (Devlin et al., 2019; Liu et al., 2019; Yang et al., 2019; Lan et al., 2020; Raffel et al., 2020; Choudhary et al., 2020) . These architectures are trained in a language model task first, followed by fine-tuning on the downstream task. In this regard, they are similar to the One Pass strategy since they successively train on disjoint datasets. Hence, problems with One Pass, such as catastrophic forgetting, are likely to occur with these architectures as well. Additionally, while Baby Steps addresses the catastrophic forgetting in One Pass, it will be harder to address catastrophic forgetting in Language Models. The downstream task objective in the CL setting is different from the Language Model objective making joint training harder, unlike Baby Steps. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 786, |
|
"end": 808, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 876, |
|
"end": 897, |
|
"text": "(Devlin et al., 2019;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 898, |
|
"end": 915, |
|
"text": "Liu et al., 2019;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 916, |
|
"end": 934, |
|
"text": "Yang et al., 2019;", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 935, |
|
"end": 952, |
|
"text": "Lan et al., 2020;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 953, |
|
"end": 973, |
|
"text": "Raffel et al., 2020;", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 974, |
|
"end": 997, |
|
"text": "Choudhary et al., 2020)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 162, |
|
"end": 170, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "To visualize Catastrophic Forgetting, Figure 2 plots the BERT model's performance. The figure illustrates the corresponding match or mismatch between model prediction and ground truth rather than just the model prediction. In this figure, the correctness of model prediction across all test samples is illustrated for every phase of the One Pass and Baby Steps methods. The samples are vertically stacked along the y-axis in an order based on the number of phases sample missclassified in. The consecutive phases of the curriculum training are indicated on the x-axis.. A darker color (value of \"0\") indicates miss-classification, and brighter or lighter color (value of \"1\") indicates that the sample is correctly classified. Note that the classification task itself is not a binary task but a multi-class classification problem. For SST5, this is a five-class classification task. Figure 2 edifies the following points.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 38, |
|
"end": 46, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 883, |
|
"end": 891, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Visualizing Catastrophic Forgetting", |
|
"sec_num": "2.4.1" |
|
}, |
|
{ |
|
"text": "\u2022 Easy and Hard Samples: In both the figures, some sections are always dark or always light in color. The difficulty of certain samples is consistent irrespective of model training. The accuracy is actually determined by the samples with intermediate difficulty whose prediction can fluctuate as the model observes more data encounters.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Visualizing Catastrophic Forgetting", |
|
"sec_num": "2.4.1" |
|
}, |
|
{ |
|
"text": "\u2022 Memory in Baby Steps: The figure for Baby Steps shows the model fairly remembering well the concepts it is trained on. Here, once the model prediction is corrected, it mostly stays corrected. Hence, implying a \"memory\" in the model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Visualizing Catastrophic Forgetting", |
|
"sec_num": "2.4.1" |
|
}, |
|
{ |
|
"text": "\u2022 Catastrophic Forgetting in One Pass Contrary to Baby Steps, One Pass heavily suffers from catastrophic forgetting, which is observed every time the color of a sample", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Visualizing Catastrophic Forgetting", |
|
"sec_num": "2.4.1" |
|
}, |
|
{ |
|
"text": "As discussed earlier, we take k = 5; hence we end up with 5 phases on the x-axis.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Visualizing Catastrophic Forgetting", |
|
"sec_num": "2.4.1" |
|
}, |
|
{ |
|
"text": "changes from lighter to darker. In One Pass, in successive phases, the model makes more correct predictions in new regions. However, at the same time, samples become darker in the regions earlier it was lighter in. Model performance decreases to the lowest in the final stages because the model is the farthest from all previous learning phases at this point.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Visualizing Catastrophic Forgetting", |
|
"sec_num": "2.4.1" |
|
}, |
|
{ |
|
"text": "\u2022 Visualizing Test Performance: This visualization is done on the unseen test set. Our above hypotheses are still natural to understand if visualized on the train set. However, the samples in the visualization are always unseen during the training. This implies that the model forgets or remembers training samples in the corresponding phases and the associated concepts as well.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Visualizing Catastrophic Forgetting", |
|
"sec_num": "2.4.1" |
|
}, |
|
{ |
|
"text": "3 Axis II: Curriculum Learning only helps Difficult tasks (Hacohen and Weinshall, 2019) and (Xu et al., 2020) propose that CL might help tasks which are harder than easier. They observe that for tasks which have low performance, the difference in performance caused due to introducing a curriculum training is more than the tasks which have a higher performance without curriculum. We call this the Task Difficulty Hypothesis. (Hacohen and Weinshall, 2019) perform an image classification experiment where they experiment with enabling curriculum on the CI-FAR dataset (Krizhevsky et al., 2009) with 100 classes(CIFAR-100) and 10 classes(CIFAR-10) as two separate experiments. They use VGG network (Simonyan and Zisserman, 2014) as the common neural network for these experiments. Naturally, they report performance of CIFAR 10 in the range of 90 to 100% (hence, an easy task) and CIFAR-10 in the range pf 60 to 70 % (hence, a hard task). On addition of a curriculum training to these datasets, they observe that the increment Figure 2 : Catastrophic Forgetting in One Pass Visualized. In the above images, the model performance on each individual test set sample is illustrated across all curriculum phases. These phases correspond with how much data the model has observed so far.", |
|
"cite_spans": [ |
|
{ |
|
"start": 58, |
|
"end": 87, |
|
"text": "(Hacohen and Weinshall, 2019)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 92, |
|
"end": 109, |
|
"text": "(Xu et al., 2020)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 427, |
|
"end": 456, |
|
"text": "(Hacohen and Weinshall, 2019)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 569, |
|
"end": 594, |
|
"text": "(Krizhevsky et al., 2009)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 698, |
|
"end": 728, |
|
"text": "(Simonyan and Zisserman, 2014)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1027, |
|
"end": 1035, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Visualizing Catastrophic Forgetting", |
|
"sec_num": "2.4.1" |
|
}, |
|
{ |
|
"text": "in performance on CIFAR-100 is almost twice the increment in performance on CIFAR-10 while using the same network VGG-net for training. They argue that this might be because in easier tasks such as CIFAR-10, there are already enough easy samples observed during training without CL, and hence improvement caused by CL is subdued. (Xu et al., 2020) enable their CL across the range of tasks in GLUE (Wang et al., 2018) . GLUE encompasses a wide range of tasks in natural language understanding, with varying performances. For example a task such as RTE or CoLA is considered harder than SST-2 or QNLI. The BERT (Devlin et al., 2019) model's performance for the same are 70.1, 60.5, 94.9, 91.1 respectively .", |
|
"cite_spans": [ |
|
{ |
|
"start": 330, |
|
"end": 347, |
|
"text": "(Xu et al., 2020)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 398, |
|
"end": 417, |
|
"text": "(Wang et al., 2018)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 610, |
|
"end": 631, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Visualizing Catastrophic Forgetting", |
|
"sec_num": "2.4.1" |
|
}, |
|
{ |
|
"text": "We built our experiments upon these works. Proposed work is different from (Hacohen and Weinshall, 2019) in the way that our work focuses on sentiment analysis in NLP, whereas (Hacohen and Weinshall, 2019) experimented strictly with image processing tasks. Furthermore, since the apparent relation between task difficulty and improvement due to CL wasn't the main focus of the work, the experimentation wasn't enough to fully establish the correlation. While (Xu et al., 2020) perform experiments within the purview of NLU and Text Classification, the experiments themselves are not consistent across dataset and the nature of task. Specifically, we believe it's hard to conclude from As reported in the original paper (Devlin et al., 2019) experiments on tasks as disparate as Linguistic Acceptability (Warstadt et al., 2019) and Sentiment Analysis (Socher et al., 2013) to establish the relationship between CL and task difficulty. Such an experiment inadvertently raises more questions whether the difference of improvement due to CL is due to the nature of tasks (Sentiment Identification as opposed to Linguistical soundness detection) or the nature of the dataset being different (sentence lengths or vocabulary). We eschew this concern by performing experiment within the same task (Sentiment Analysis), and the same dataset (SST-5), instead we introduce change in task difficulty by using the fine grained labels provided by this dataset. This experimental setup will be explained in more detailed in the following sections.", |
|
"cite_spans": [ |
|
{ |
|
"start": 75, |
|
"end": 104, |
|
"text": "(Hacohen and Weinshall, 2019)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 176, |
|
"end": 205, |
|
"text": "(Hacohen and Weinshall, 2019)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 459, |
|
"end": 476, |
|
"text": "(Xu et al., 2020)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 719, |
|
"end": 740, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 803, |
|
"end": 826, |
|
"text": "(Warstadt et al., 2019)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 850, |
|
"end": 871, |
|
"text": "(Socher et al., 2013)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Visualizing Catastrophic Forgetting", |
|
"sec_num": "2.4.1" |
|
}, |
|
{ |
|
"text": "To ensure no effect of nature of task or dataset on our experiments, we utilize just a single dataset but under different conditions and sampling to simulate difficulty. All these meta-datas are generated using the SST-5 (Socher et al., 2013) dataset. It is important to note that we do not use the phrase data for training which is why our scores may fall short of earlier reported performances of BERT on SST5 and SST derived datasets (Devlin et al., 2019; Liu et al., 2019; Xu et al., 2020 variants of the same dataset without any significant change in the problem statement definition and the dataset statistics. We generate four datasets from SST5: SST-2, SST-3, SST-4 and SST-5 itself. In SST-3 and SST-5 neutral label is preserved, otherwise dropped. The two negative labels and the two positive labels are clubbed in SST-3 and SST-2.", |
|
"cite_spans": [ |
|
{ |
|
"start": 221, |
|
"end": 242, |
|
"text": "(Socher et al., 2013)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 437, |
|
"end": 458, |
|
"text": "(Devlin et al., 2019;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 459, |
|
"end": 476, |
|
"text": "Liu et al., 2019;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 477, |
|
"end": 492, |
|
"text": "Xu et al., 2020", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "In each of the SST-x dataset, the train, test and dev sets are all converted accordingly. The model in all cases is strictly trained on the train data with development as validation and results are reported on test set. This way the comparison can be fair since dataset specific effects on the hypothesis would not be pertinent.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Humans are likely to observe sentiment polarity of a natural language sentence on a continuous scale rather than distinct classes. Hence, it makes natural sense that making a distinction between, Very Positive and Positive is a harder task than just making a distinction between positive and negative.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We use the Auxiliary Model Strategy (Section 2.3.1) coupled with Baby Steps (Section 2) as our curriculum training technique and BERT as the model architecture. We use Baby Steps (Cirik et al., 2016) as the curriculum pacing since we observed it to have best performance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 179, |
|
"end": 199, |
|
"text": "(Cirik et al., 2016)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Curriculum Training", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Our Experiments are illustrated in figure 3 and table 2. Following are the major points to be noted", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussions", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "\u2022 Number of classes is detrimental to model performance We observe that in both with curriculum and No curriculum settings, model performance varies tremendously between SST-5 and SST-2. This is also evident in previous works which have shown performance ranges on SST to lie in the ranges of 45-50% (Cirik et al., 2016) for a 5 class problem and in the order of 90% (Lan et al., 2020; Devlin Figure 3 : Accuracy scores in percentage on all the created meta-data with and without curriculum. The difference has been plotted on the secondary y-axis (Right) et al., 2019; Xu et al., 2020) for a binary class problem. Furthermore, it is interesting to note that the performance gap between SST-i and SST-i + 1 decreases as number of classes increase. This implies while adding classes makes the task harder, the hardness is less relevant if already many classes.", |
|
"cite_spans": [ |
|
{ |
|
"start": 300, |
|
"end": 320, |
|
"text": "(Cirik et al., 2016)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 367, |
|
"end": 385, |
|
"text": "(Lan et al., 2020;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 386, |
|
"end": 401, |
|
"text": "Devlin Figure 3", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussions", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "\u2022 Curriculum has adverse effects on high performing models The most striking point to be observed is that curriculum actually does not help in certain cases. In SST-2 and SST-3, the model is already performing quite well with accuracy of 91.11 and 75.52 respectively In these cases the model observes a decre-ments= of 1.6 and 0.05 % respectively due to CL. The performance on SST-2 is in line with previous work (Xu et al., 2020) who also observe a slight decrement to no increment in this dataset for their own curriculum technique. Tasks which are having performance are in essence, possessing higher number of easier samples than tougher tasks. In such a condition, A model observing these easy samples again and again (as we saw earlier in Baby Steps), might lead to overfitting. We believe this to be the reason behind then apparent performance degradation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 413, |
|
"end": 430, |
|
"text": "(Xu et al., 2020)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussions", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "\u2022 Curriculum has positive effects on low per-forming models Despite low performance on SST-2, the resourcefulness of CL is felt in SST-5 and SST-4 where there is a positive difference. These tasks are harder in nature with No Curriculum performance below 65%, and CL is able to improve the scores significantly with an average of +1.16%. As we saw in previous section, Hard samples are catastrophically forgotten all the time, hence training on these samples again and again will lead to an improvement. Unlike SST-2 and SST-3 which have a huge proportion of easy samples, Hard tasks according have harder samples and hence instead repeated training in a systematic ordering would not have an adverse effect.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussions", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Furthermore, (Xu et al., 2020 ) suggest a similar reasoning for the Task Difficulty hypothesis. They suggest that when learning without curriculum, in the case of harder tasks, the model is confounded and overwhelmed by the presence of hard samples. Hence, laying out the training where the model observes easier samples first is natural to improve performance. However, this reasoning does not explain well why there is an apparent decrement of performance for CL on high performing tasks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 13, |
|
"end": 29, |
|
"text": "(Xu et al., 2020", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussions", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Visualization of Attention (Vaswani et al., 2017) is an important and popular visualization method for interpreting the prediction in these models. Previous works such as (Clark et al., 2019) have visualized attentions to identify where does BERT look. (Vaswani et al., 2017) also show in attention visualization that various heads of the transformer look at various linguistic details. In this section we use attention visualization to qualitatively analyze and explain how the model's focus on the sentence changes as the various stages (when model encounters new and harder data) of CL progresses. It is important to note that BERT has N layers and H heads. Within each of these N xH heads there lies a T xT self attention from each input time stamp to every other time stamp. Where T is the maximum sentence length. Thereby, there are a total of N xH 2D T xT Attention visualizations observed in BERT.", |
|
"cite_spans": [ |
|
{ |
|
"start": 27, |
|
"end": 49, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 171, |
|
"end": 191, |
|
"text": "(Clark et al., 2019)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 253, |
|
"end": 275, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Axis III: Attention Movement Visualization", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "To particularly identify how focus of the BERT model changes across multiple stages or phases of the curriculum, we devise an attention movement visualization. We define Attention Movement index as", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "(3) M (i, h, n) = A(i, h, n) \u2212 A(i \u2212 1, h, n) \u2200i \u2208 {1, 2, 3..c}", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "where M is the proposed Movement index, A(i, h, n) is the Attention visualization of the h th head of n th layer after training i th phase of curriculum training and c is the total number of phases in the curriculum training. We can see that a positive M (i) indicates that across subsequent stages of the curriculum, the model has added attention or increased focus in the area. Conversely, a negative M (i) indicates that after completing phase i model has decided to attend to certain area or region lesser than before the phase's training. Essentially the Movement indicates relative change in attention rather than attention itself. This is significant because, unlike typical visualization where we observe a singular model in isolation, here we are attempting to analyze how the model behaves in subsequent stages of the training in a curriculum fashion and furthermore, how this change effects the prediction label.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Additionally, a point to note is that while previous works such as (Clark et al., 2019) establish that BERT's distinct heads attend on linguistic notions of syntax among other linguistic ideas, we do not visualize these attention heads themselves. In this experiment, we are looking for an overall perspective into where there is a positive or negative change in the attention focus rather than understanding the individual linguistic nuances of individual heads. For this reason we further propose an Averaged Movement index as follows: as of the experiments explained in Section 3. To recapitulate, we use SST-5 dataset with Auxiliary Model Technique in Baby Steps training procedure. The c in Baby Steps training is 5 in our experiments.", |
|
"cite_spans": [ |
|
{ |
|
"start": 67, |
|
"end": 87, |
|
"text": "(Clark et al., 2019)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "M avg (i) = H,N h=2,n=2 M (i, h, n)", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Experiments", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Figures 4 gives examples for M avg scores for three sentences. There are a total five phases in our training and hence total four M avg scores per sentence with each score being T xT in attention size with T as maximum sentence length. In these images, a blue color implies there was a addition of attention or focus in the region and red or a negative value implies there was a negation in the model's focus on the area. Almost no color or white color indicates no change in attention in the region. The movement index visualizations is shown along the direction of the arrow. All the following examples are from the Testing set, after the curriculum training, the intermediate phase wise models were extracted and made to predict on the individual test sentences. Furthermore, please note that for each of the T xT attentions, y-axis denotes the input positions and x-axis denotes the output positions of the self attention. Our analysis can be listed for each sentence as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussions", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "\u2022 In this sentence (True Label: Negative), \"static , repetitive , muddy and blurry , hey arnold !\" the model predicts incorrectly in the beginning and continues to mislabel until the third phase of training. We observe that on finishing the third phase of the training the movement index (2nd image from top), shows a motion of higher focus on \"static repetitive muddy\" (and lower focus from redundant words. While in the first and second phase when the model observed just a sample of easy data, the movement of attention wasn't in any direction that could help the classification. The motion in third phase however shows the addition of focus on words which could help the model classify the sentence as negative. Furthermore, until the last phase we also see an addition of focus to the terms \"hey arnold !\", this could model's way of focusing on some neutral words to avoid predicting Very Negative as opposed to Negative. Essentially, CL has divided up the task of predicting sentiment into multiple sections where the model first learnt the neutrality from \"hey arnold\" followed by focusing the polarity terms to predict just negative. This makes the prediction easier and hence effective than no curriculum.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussions", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "\u2022 In this sentence (True Label: Positive), \"it 's a worthwhile tutorial in quantum physics and slash-dash\" the model predicts incorrectly until model's completion of Phase four training. In phases two and three we see that model is adding weight to some neutral words such as \"it ' s a\" and \"slash-dash\", hence ends up predicting neutral which is an incorrect prediction. However, After observing phase four samples, model finally increases attention to the actual polarity indicating words in the sentence \"worthwhile\". and hence shifts prediction appropriately to Positive. There is a possibility that since \"worthwhile\" does not exist in BERT's vocabulary it might have found it hard to map the word to positive polarity until enough training data was observed by the model in subsequent phases.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussions", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "In all the above examples, there is a common pattern, which is that CL spreads out the sentiment prediction process. Training on initial easier samples rules out whether prediction is correct.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussions", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "In this paper we conduct experiments to analyze CL. We first hypothesize the failure of One Pass for Curriculum learning in text classification. We conduct an Individual non curriculum experiment, to show that One Pass heavily suffers with catastrophic forgetting. Unless this forgetting is tackled, One Pass will continue to under perform even no curriculum settings. Then, we analyze CL along Task Difficulty Hypothesis to establish firmly that CL only helps when coupled with difficult tasks. The scope of improvement may diminish or even be negative for tasks which are already easy. Finally we propose movement visualizations to analyse CL. We observe that for hard examples, CL breaks down the task into multiple phases. These phases in a way rule out certain sections of the sentence for sentiment prediction first, such that when the model encounters harder data in later phases, it is clear on what not to focus on. Furthermore, after both the experiments, we can conclude that the reason CL might be deleterious to performance in easier tasks could be because in these tasks, multiple phases are unnecessary. If the model prediction was already correct in the first couple of phases, then further phases may only move the prediction away hence leading to poor performance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "A Difficult and Easy samples according to the Auxiliary Strategy Table 3 shows some examples of difficult ad easy samples according to the Auxiliary Model Curriculum Strategy explained in section 2.3.1. As we can observe, this curriculum is effective because it considers a sample as difficult in similar ways to a human discerning the sentiment as well.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 65, |
|
"end": 72, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "In the sentence from figure 5 (True Label: Negative) \"if this dud had been made in the '70s , it would have been called the hills have antlers and played for about three weeks in drive-ins .\", the model predicts incorrectly until the fourth phase. After the second phase, the model predicts Positive by increasing focus on parts of the sentence such as \"the hills\" and \"played for\" which could have a slight positive (as opposed to very positive) connotation in a movie review context. After the third phase, the motion is more towards \"the hills\" and \"dud\" which have neutral and negative connotation respectively leading to an overall sentiment of Neural. Finally, after the fourth phase, the movement is most towards the only strongly negative word \"dud\". Like the previous example, \"dud\" is not part of BERT's vocabulary and the only reason the model choose to increase its attention here must be because it already explored other regions of the sentence in previous phases. Hence, without this specific ordering, model would have lower probability of focusing on the broken subwords \"du -##d\".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B Additional Explanation for Attention Movement Visualization", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In all the examples, there is a common pattern, that curriculum learning spreads out the sentiment prediction process. Training on initial easier samples rules out incorrect predictions from parts of the sentence which do not contribute to the sentiment. This helps in later phases when model receives information to focus on right segment of the sentence. In this stage, the model would be more confident in its prediction since other regions of the sentence were already explored and excluded from the prediction process. In essence, CL debases the proba-bility of incorrect prediction by revealing the right data at the right time in the spread out curriculum form of training.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B Additional Explanation for Attention Movement Visualization", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "Example Easy 1. meeting , even exceeding expectations , it 's the best sequel since the empire strikes back ... a majestic achievement , an epic of astonishing grandeur and surprising emotional depth . (from first 50 samples) 2. the most wondrous love story in years , it is a great film .3. one of the best looking and stylish animated movies in quite a while ... Hard 1. if the predictability of bland comfort food appeals to you , then the film is a pleasant enough dish . (from last 50 samples) 2. it is a testament of quiet endurance , of common concern , of reconciled survival .' 3. this movie is so bad , that it 's almost worth seeing because it 's so bad .' ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Label", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Teaching by examples: Implications for the process of category acquisition. The Quarterly", |
|
"authors": [ |
|
{ |
|
"first": "Judith", |
|
"middle": [], |
|
"last": "Avrahami", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yaakov", |
|
"middle": [], |
|
"last": "Kareev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yonatan", |
|
"middle": [], |
|
"last": "Bogot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruth", |
|
"middle": [], |
|
"last": "Caspi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salomka", |
|
"middle": [], |
|
"last": "Dunaevsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sharon", |
|
"middle": [], |
|
"last": "Lerner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Journal of Experimental Psychology Section A", |
|
"volume": "50", |
|
"issue": "3", |
|
"pages": "586--606", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Judith Avrahami, Yaakov Kareev, Yonatan Bogot, Ruth Caspi, Salomka Dunaevsky, and Sharon Lerner. 1997. Teaching by examples: Implications for the process of category acquisition. The Quar- terly Journal of Experimental Psychology Section A, 50(3):586-606.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Curriculum learning", |
|
"authors": [ |
|
{ |
|
"first": "Yoshua", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J\u00e9r\u00f4me", |
|
"middle": [], |
|
"last": "Louradour", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ronan", |
|
"middle": [], |
|
"last": "Collobert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Weston", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the 26th annual international conference on machine learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "41--48", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yoshua Bengio, J\u00e9r\u00f4me Louradour, Ronan Collobert, and Jason Weston. 2009. Curriculum learning. In Proceedings of the 26th annual international confer- ence on machine learning, pages 41-48. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Selfsupervised hyperboloid representations from logical queries over knowledge graphs", |
|
"authors": [ |
|
{ |
|
"first": "Nurendra", |
|
"middle": [], |
|
"last": "Choudhary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikhil", |
|
"middle": [], |
|
"last": "Rao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sumeet", |
|
"middle": [], |
|
"last": "Katariya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karthik", |
|
"middle": [], |
|
"last": "Subbian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chandan K", |
|
"middle": [], |
|
"last": "Reddy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2012.13023" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nurendra Choudhary, Nikhil Rao, Sumeet Katariya, Karthik Subbian, and Chandan K Reddy. 2020. Self- supervised hyperboloid representations from logi- cal queries over knowledge graphs. arXiv preprint arXiv:2012.13023.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Visualizing and understanding curriculum learning for long short-term memory networks", |
|
"authors": [ |
|
{ |
|
"first": "Volkan", |
|
"middle": [], |
|
"last": "Cirik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eduard", |
|
"middle": [], |
|
"last": "Hovy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Louis-Philippe", |
|
"middle": [], |
|
"last": "Morency", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1611.06204" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Volkan Cirik, Eduard Hovy, and Louis-Philippe Morency. 2016. Visualizing and understanding cur- riculum learning for long short-term memory net- works. arXiv preprint arXiv:1611.06204.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "What does bert look at? an analysis of bert's attention", |
|
"authors": [ |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Urvashi", |
|
"middle": [], |
|
"last": "Khandelwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "276--286", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kevin Clark, Urvashi Khandelwal, Omer Levy, and Christopher D Manning. 2019. What does bert look at? an analysis of bert's attention. In Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pages 276-286.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 1 (Long and Short Papers), pages 4171-4186.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "On the power of curriculum learning in training deep networks", |
|
"authors": [ |
|
{ |
|
"first": "Guy", |
|
"middle": [], |
|
"last": "Hacohen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daphna", |
|
"middle": [], |
|
"last": "Weinshall", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2535--2544", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guy Hacohen and Daphna Weinshall. 2019. On the power of curriculum learning in training deep net- works. In International Conference on Machine Learning, pages 2535-2544. PMLR.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Treestructured curriculum learning based on semantic similarity of text", |
|
"authors": [ |
|
{ |
|
"first": "Sanggyu", |
|
"middle": [], |
|
"last": "Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sung-Hyon", |
|
"middle": [], |
|
"last": "Myaeng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "16th IEEE International Conference on Machine Learning and Applications (ICMLA)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "971--976", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sanggyu Han and Sung-Hyon Myaeng. 2017. Tree- structured curriculum learning based on semantic similarity of text. In 2017 16th IEEE International Conference on Machine Learning and Applications (ICMLA), pages 971-976. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Long short-term memory", |
|
"authors": [ |
|
{ |
|
"first": "Sepp", |
|
"middle": [], |
|
"last": "Hochreiter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J\u00fcrgen", |
|
"middle": [], |
|
"last": "Schmidhuber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Neural computation", |
|
"volume": "9", |
|
"issue": "8", |
|
"pages": "1735--1780", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural computation, 9(8):1735-1780.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Learning multiple layers of features from tiny images", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Krizhevsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Hinton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Krizhevsky, Geoffrey Hinton, et al. 2009. Learn- ing multiple layers of features from tiny images.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Flexible shaping: How learning in small steps helps", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Kai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Krueger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Dayan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Cognition", |
|
"volume": "110", |
|
"issue": "3", |
|
"pages": "380--394", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kai A Krueger and Peter Dayan. 2009. Flexible shap- ing: How learning in small steps helps. Cognition, 110(3):380-394.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Albert: A lite bert for self-supervised learning of language representations", |
|
"authors": [ |
|
{ |
|
"first": "Zhenzhong", |
|
"middle": [], |
|
"last": "Lan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mingda", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Goodman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Gimpel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Piyush", |
|
"middle": [], |
|
"last": "Sharma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Radu", |
|
"middle": [], |
|
"last": "Soricut", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, and Radu Soricut. 2020. Albert: A lite bert for self-supervised learning of language representations. In International Con- ference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Curriculum learning for natural answer generation", |
|
"authors": [ |
|
{ |
|
"first": "Cao", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shizhu", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jun", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "IJCAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4223--4229", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cao Liu, Shizhu He, Kang Liu, and Jun Zhao. 2018. Curriculum learning for natural answer generation. In IJCAI, pages 4223-4229.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Roberta: A robustly optimized bert pretraining approach", |
|
"authors": [ |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingfei", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1907.11692" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining ap- proach. arXiv preprint arXiv:1907.11692.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Exploring the limits of transfer learning with a unified text-to-text transformer", |
|
"authors": [ |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Raffel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Roberts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katherine", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sharan", |
|
"middle": [], |
|
"last": "Narang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Matena", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanqi", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter J", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "21", |
|
"issue": "", |
|
"pages": "1--67", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. 2020. Exploring the lim- its of transfer learning with a unified text-to-text transformer. Journal of Machine Learning Research, 21:1-67.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "A sentiwordnet strategy for curriculum learning in sentiment analysis", |
|
"authors": [ |
|
{ |
|
"first": "Kaveri", |
|
"middle": [], |
|
"last": "Vijjini Anvesh Rao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Radhika", |
|
"middle": [], |
|
"last": "Anuranjana", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mamidi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "International Conference on Applications of Natural Language to Information Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "170--178", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vijjini Anvesh Rao, Kaveri Anuranjana, and Radhika Mamidi. 2020. A sentiwordnet strategy for curricu- lum learning in sentiment analysis. In International Conference on Applications of Natural Language to Information Systems, pages 170-178. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Easy questions first? a case study on curriculum learning for question answering", |
|
"authors": [ |
|
{ |
|
"first": "Mrinmaya", |
|
"middle": [], |
|
"last": "Sachan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Xing", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "453--463", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mrinmaya Sachan and Eric Xing. 2016. Easy questions first? a case study on curriculum learning for ques- tion answering. In Proceedings of the 54th Annual Meeting of the Association for Computational Lin- guistics (Volume 1: Long Papers), volume 1, pages 453-463.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Self-training for jointly learning to ask and answer questions", |
|
"authors": [ |
|
{ |
|
"first": "Mrinmaya", |
|
"middle": [], |
|
"last": "Sachan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Xing", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "629--640", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mrinmaya Sachan and Eric Xing. 2018. Self-training for jointly learning to ask and answer questions. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, Volume 1 (Long Papers), pages 629-640.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Very deep convolutional networks for large-scale image recognition", |
|
"authors": [ |
|
{ |
|
"first": "Karen", |
|
"middle": [], |
|
"last": "Simonyan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Zisserman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1409.1556" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Karen Simonyan and Andrew Zisserman. 2014. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Recursive deep models for semantic compositionality over a sentiment treebank", |
|
"authors": [ |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Perelygin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jean", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Chuang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Christopher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Potts", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 2013 conference on empirical methods in natural language processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1631--1642", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Richard Socher, Alex Perelygin, Jean Wu, Jason Chuang, Christopher D Manning, Andrew Ng, and Christopher Potts. 2013. Recursive deep models for semantic compositionality over a sentiment tree- bank. In Proceedings of the 2013 conference on empirical methods in natural language processing, pages 1631-1642.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "From baby steps to leapfrog: How less is more in unsupervised dependency parsing", |
|
"authors": [ |
|
{ |
|
"first": "Hiyan", |
|
"middle": [], |
|
"last": "Valentin I Spitkovsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Alshawi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Jurafsky", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Human Language Technologies: The 2010 Annual Conference of the North American Chapter of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "751--759", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Valentin I Spitkovsky, Hiyan Alshawi, and Daniel Ju- rafsky. 2010. From baby steps to leapfrog: How less is more in unsupervised dependency parsing. In Hu- man Language Technologies: The 2010 Annual Con- ference of the North American Chapter of the Associ- ation for Computational Linguistics, pages 751-759. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Learning the curriculum with bayesian optimization for taskspecific word representation learning", |
|
"authors": [ |
|
{ |
|
"first": "Yulia", |
|
"middle": [], |
|
"last": "Tsvetkov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manaal", |
|
"middle": [], |
|
"last": "Faruqui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wang", |
|
"middle": [], |
|
"last": "Ling", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brian", |
|
"middle": [], |
|
"last": "Macwhinney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "130--139", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yulia Tsvetkov, Manaal Faruqui, Wang Ling, Brian MacWhinney, and Chris Dyer. 2016. Learning the curriculum with bayesian optimization for task- specific word representation learning. In Proceed- ings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Pa- pers), pages 130-139.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u0141ukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in neural information pro- cessing systems, pages 5998-6008.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Glue: A multi-task benchmark and analysis platform for natural language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanpreet", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Michael", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Hill", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "353--355", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel Bowman. 2018. Glue: A multi-task benchmark and analysis platform for natural language understanding. In Proceedings of the 2018 EMNLP Workshop BlackboxNLP: An- alyzing and Interpreting Neural Networks for NLP, pages 353-355.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Neural network acceptability judgments", |
|
"authors": [ |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Warstadt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanpreet", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samuel", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Bowman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "7", |
|
"issue": "", |
|
"pages": "625--641", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alex Warstadt, Amanpreet Singh, and Samuel R Bow- man. 2019. Neural network acceptability judgments. Transactions of the Association for Computational Linguistics, 7:625-641.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Curriculum learning by transfer learning: Theory and experiments with deep networks", |
|
"authors": [ |
|
{ |
|
"first": "Daphna", |
|
"middle": [], |
|
"last": "Weinshall", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gad", |
|
"middle": [], |
|
"last": "Cohen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Amir", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5238--5246", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daphna Weinshall, Gad Cohen, and Dan Amir. 2018. Curriculum learning by transfer learning: Theory and experiments with deep networks. In Inter- national Conference on Machine Learning, pages 5238-5246. PMLR.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Curriculum learning for natural language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Benfeng", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Licheng", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhendong", |
|
"middle": [], |
|
"last": "Mao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hongtao", |
|
"middle": [], |
|
"last": "Xie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yongdong", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6095--6104", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Benfeng Xu, Licheng Zhang, Zhendong Mao, Quan Wang, Hongtao Xie, and Yongdong Zhang. 2020. Curriculum learning for natural language under- standing. In Proceedings of the 58th Annual Meet- ing of the Association for Computational Linguistics, pages 6095-6104.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Xlnet: Generalized autoregressive pretraining for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Zhilin", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zihang", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaime", |
|
"middle": [], |
|
"last": "Carbonell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Russ", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc V", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5754--5764", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Car- bonell, Russ R Salakhutdinov, and Quoc V Le. 2019. Xlnet: Generalized autoregressive pretraining for language understanding. In Advances in neural in- formation processing systems, pages 5754-5764.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Performance comparison shows weakness of One Pass." |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "where, M avg (i) is Averaged Movement index across all heads and layers after the i th curriculum training phase. h and i are iterators h and n are iterators over the total number of heads H and total number of layers N in the BERT Transformer. Total number of curriculum phases are c. Since, M is a difference between subsequent phases, there are a total of c \u2212 1 Average Movement index visualizations of size T xT .Other experimental details such as dataset, curriculum strategy, baby steps, model are all same" |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Attention Movement Index (M avg (i)) visualization for the first and second sentence. The matrices show how model's focus changes for the input sentence between the phases. Blue color implies attention was added during phase transition while red implies attention diminished for those words." |
|
}, |
|
"FIGREF3": { |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Attention Movement Index Visualizations for the sentence. The matrices show how model's focus changes for the input sentence between the phases. Blue color implies attention was added during phase transition while red implies attention diminished for those words." |
|
}, |
|
"TABREF0": { |
|
"html": null, |
|
"text": "", |
|
"type_str": "table", |
|
"content": "<table><tr><td>and Figure 1 give us an insight into what is</td></tr><tr><td>happening with One Pass. Firstly, Table 1 clearly</td></tr><tr><td>shows that One Pass underperforms both No cur-</td></tr><tr><td>riculum and Baby Steps for both the Curriculum</td></tr><tr><td>Strategies. This is in line with previous experi-</td></tr><tr><td>ments as well</td></tr></table>", |
|
"num": null |
|
}, |
|
"TABREF1": { |
|
"html": null, |
|
"text": "Accuracy scores in percentage of the two curriculums across Baby Steps and One Pass as compared to no curriculum on the SST5 dataset.", |
|
"type_str": "table", |
|
"content": "<table><tr><td>Model</td><td/><td>Curriculum Strategies</td><td/></tr><tr><td/><td>Baby Steps</td><td>One Pass</td><td>No Curriculum</td></tr><tr><td>Auxiliary Strategy</td><td>52.3</td><td>24.15</td><td>50.03</td></tr><tr><td>Sentence Length Strategy</td><td>51.2</td><td>23.64</td><td>50.03</td></tr></table>", |
|
"num": null |
|
}, |
|
"TABREF3": { |
|
"html": null, |
|
"text": "Accuracy scores in percentage on all the created meta-data with and without curriculum. Results are obtained by averaging over 5 runs.", |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |