diff --git "a/sent-level.test.intents.4K.json" "b/sent-level.test.intents.4K.json" new file mode 100644--- /dev/null +++ "b/sent-level.test.intents.4K.json" @@ -0,0 +1,305 @@ +{"before_sent": "In this paper , we present a new sequence-to-sequence pre-training model called ProphetNet, which introduces a novel self-supervised objective named future n-gram prediction and the proposed n-stream self-attention mechanism.", "after_sent": "This paper presents a new sequence-to-sequence pre-training model called ProphetNet, which introduces a novel self-supervised objective named future n-gram prediction and the proposed n-stream self-attention mechanism.", "before_sent_with_intent": " In this paper , we present a new sequence-to-sequence pre-training model called ProphetNet, which introduces a novel self-supervised objective named future n-gram prediction and the proposed n-stream self-attention mechanism. Instead of the optimization of one-step ahead prediction in traditional sequence-to-sequence model, the ProphetNet is optimized by n-step ahead prediction which predicts the next n tokens simultaneously based on previous context tokens at each time step. The future n-gram prediction explicitly encourages the model to plan for the future tokens and prevent overfitting on strong local correlations.", "labels": "clarity", "doc_id": "2001.04063", "revision_depth": 2} +{"before_sent": " Instead of the optimization of one-step ahead prediction in traditional sequence-to-sequence model, the ProphetNet is optimized by n-step ahead prediction which predicts the next n tokens simultaneously based on previous context tokens at each time step.", "after_sent": " Instead of optimizing one-step-ahead prediction in the traditional sequence-to-sequence model, the ProphetNet is optimized by n-step ahead prediction which predicts the next n tokens simultaneously based on previous context tokens at each time step.", "before_sent_with_intent": " In this paper , we present a new sequence-to-sequence pre-training model called ProphetNet, which introduces a novel self-supervised objective named future n-gram prediction and the proposed n-stream self-attention mechanism. Instead of the optimization of one-step ahead prediction in traditional sequence-to-sequence model, the ProphetNet is optimized by n-step ahead prediction which predicts the next n tokens simultaneously based on previous context tokens at each time step. The future n-gram prediction explicitly encourages the model to plan for the future tokens and prevent overfitting on strong local correlations. We pre-train ProphetNet using a base scale dataset (16GB) and a large scale dataset (160GB) respectively.", "labels": "clarity", "doc_id": "2001.04063", "revision_depth": 2} +{"before_sent": " Instead of the optimization of one-step ahead prediction in traditional sequence-to-sequence model, the ProphetNet is optimized by n-step ahead prediction which predicts the next n tokens simultaneously based on previous context tokens at each time step.", "after_sent": " Instead of the optimization of one-step ahead prediction in traditional sequence-to-sequence model, the ProphetNet is optimized by n-step ahead prediction that predicts the next n tokens simultaneously based on previous context tokens at each time step.", "before_sent_with_intent": " In this paper , we present a new sequence-to-sequence pre-training model called ProphetNet, which introduces a novel self-supervised objective named future n-gram prediction and the proposed n-stream self-attention mechanism. Instead of the optimization of one-step ahead prediction in traditional sequence-to-sequence model, the ProphetNet is optimized by n-step ahead prediction which predicts the next n tokens simultaneously based on previous context tokens at each time step. The future n-gram prediction explicitly encourages the model to plan for the future tokens and prevent overfitting on strong local correlations. We pre-train ProphetNet using a base scale dataset (16GB) and a large scale dataset (160GB) respectively.", "labels": "fluency", "doc_id": "2001.04063", "revision_depth": 2} +{"before_sent": " We pre-train ProphetNet using a base scale dataset (16GB) and a large scale dataset (160GB) respectively.", "after_sent": " We pre-train ProphetNet using a base scale dataset (16GB) and a large-scale dataset (160GB) respectively.", "before_sent_with_intent": " Instead of the optimization of one-step ahead prediction in traditional sequence-to-sequence model, the ProphetNet is optimized by n-step ahead prediction which predicts the next n tokens simultaneously based on previous context tokens at each time step. The future n-gram prediction explicitly encourages the model to plan for the future tokens and prevent overfitting on strong local correlations. We pre-train ProphetNet using a base scale dataset (16GB) and a large scale dataset (160GB) respectively. Then we conduct experiments on CNN/DailyMail, Gigaword, and SQuAD 1.1 benchmarks for abstractive summarization and question generation tasks. Experimental results show that ProphetNet achieves new state-of-the-art results on all these datasets compared to the models using the same scale pre-training corpus.", "labels": "fluency", "doc_id": "2001.04063", "revision_depth": 2} +{"before_sent": " We pre-train ProphetNet using a base scale dataset (16GB) and a large scale dataset (160GB) respectively.", "after_sent": " We pre-train ProphetNet using a base scale dataset (16GB) and a large scale dataset (160GB) , respectively.", "before_sent_with_intent": " Instead of the optimization of one-step ahead prediction in traditional sequence-to-sequence model, the ProphetNet is optimized by n-step ahead prediction which predicts the next n tokens simultaneously based on previous context tokens at each time step. The future n-gram prediction explicitly encourages the model to plan for the future tokens and prevent overfitting on strong local correlations. We pre-train ProphetNet using a base scale dataset (16GB) and a large scale dataset (160GB) respectively. Then we conduct experiments on CNN/DailyMail, Gigaword, and SQuAD 1.1 benchmarks for abstractive summarization and question generation tasks. Experimental results show that ProphetNet achieves new state-of-the-art results on all these datasets compared to the models using the same scale pre-training corpus.", "labels": "fluency", "doc_id": "2001.04063", "revision_depth": 2} +{"before_sent": " While this approach underperforms its supervised counterpart, we show in this work that the two ideas can be combined: We introduce Pattern-Exploiting Training (PET), a semi-supervised training procedure that reformulates input examples as cloze-style phrases which help the language model understand the given task.", "after_sent": " While this approach underperforms its supervised counterpart, we show in this work that the two ideas can be combined: We introduce Pattern-Exploiting Training (PET), a semi-supervised training procedure that reformulates input examples as cloze-style phrases to help language models understand a given task.", "before_sent_with_intent": " Some NLP tasks can be solved in a fully unsupervised fashion by providing a pretrained language model with \"task descriptions\" in natural language (e.g., Radford et al., 2019). While this approach underperforms its supervised counterpart, we show in this work that the two ideas can be combined: We introduce Pattern-Exploiting Training (PET), a semi-supervised training procedure that reformulates input examples as cloze-style phrases which help the language model understand the given task. Theses phrases are then used to assign soft labels to a large set of unlabeled examples. Finally, regular supervised training is performed on the resulting training set.", "labels": "clarity", "doc_id": "2001.07676", "revision_depth": 1} +{"before_sent": " Theses phrases are then used to assign soft labels to a large set of unlabeled examples.", "after_sent": " These phrases are then used to assign soft labels to a large set of unlabeled examples.", "before_sent_with_intent": " Some NLP tasks can be solved in a fully unsupervised fashion by providing a pretrained language model with \"task descriptions\" in natural language (e.g., Radford et al., 2019). While this approach underperforms its supervised counterpart, we show in this work that the two ideas can be combined: We introduce Pattern-Exploiting Training (PET), a semi-supervised training procedure that reformulates input examples as cloze-style phrases which help the language model understand the given task. Theses phrases are then used to assign soft labels to a large set of unlabeled examples. Finally, regular supervised training is performed on the resulting training set. On several tasks , we show that PET outperforms both supervised training and unsupervised approaches in low-resource settings by a large margin.", "labels": "fluency", "doc_id": "2001.07676", "revision_depth": 1} +{"before_sent": " On several tasks , we show that PET outperforms both supervised training and unsupervised approaches in low-resource settings by a large margin.", "after_sent": " For several tasks and languages, PET outperforms both supervised training and unsupervised approaches in low-resource settings by a large margin.", "before_sent_with_intent": " Theses phrases are then used to assign soft labels to a large set of unlabeled examples. Finally, regular supervised training is performed on the resulting training set. On several tasks , we show that PET outperforms both supervised training and unsupervised approaches in low-resource settings by a large margin. ", "labels": "clarity", "doc_id": "2001.07676", "revision_depth": 1} +{"before_sent": " Based on the general linguistic structure of humor, in this paper, we propose a novel approach for detecting humor in short texts by using BERT sentence embedding .", "after_sent": " In this paper, we propose a novel approach for detecting humor in short texts by using BERT sentence embedding .", "before_sent_with_intent": " Automatic humor detection has interesting use cases in modern technologies, such as chatbots and virtual assistants. Based on the general linguistic structure of humor, in this paper, we propose a novel approach for detecting humor in short texts by using BERT sentence embedding . Our proposed method uses BERT to generate embeddings for sentences of a given text and uses these embeddings as inputs for parallel lines of hidden layers in a neural network. These lines are finally concatenated to predict the target value.", "labels": "coherence", "doc_id": "2004.12765", "revision_depth": 2} +{"before_sent": " Based on the general linguistic structure of humor, in this paper, we propose a novel approach for detecting humor in short texts by using BERT sentence embedding .", "after_sent": " Based on the general linguistic structure of humor, in this paper, we propose a novel approach for detecting humor in short texts based on the general linguistic structure of humor .", "before_sent_with_intent": " Automatic humor detection has interesting use cases in modern technologies, such as chatbots and virtual assistants. Based on the general linguistic structure of humor, in this paper, we propose a novel approach for detecting humor in short texts by using BERT sentence embedding . Our proposed method uses BERT to generate embeddings for sentences of a given text and uses these embeddings as inputs for parallel lines of hidden layers in a neural network. These lines are finally concatenated to predict the target value.", "labels": "clarity", "doc_id": "2004.12765", "revision_depth": 2} +{"before_sent": " Our proposed method uses BERT to generate embeddings for sentences of a given text and uses these embeddings as inputs for parallel lines of hidden layers in a neural network.", "after_sent": " Our proposed method uses BERT to generate embeddings for sentences of a given text and uses these embeddings as inputs of parallel lines of hidden layers in a neural network.", "before_sent_with_intent": " Automatic humor detection has interesting use cases in modern technologies, such as chatbots and virtual assistants. Based on the general linguistic structure of humor, in this paper, we propose a novel approach for detecting humor in short texts by using BERT sentence embedding . Our proposed method uses BERT to generate embeddings for sentences of a given text and uses these embeddings as inputs for parallel lines of hidden layers in a neural network. These lines are finally concatenated to predict the target value. For evaluation purposes, we created a new dataset for humor detection consisting of 200k formal short texts (100k positive and 100k negative).", "labels": "fluency", "doc_id": "2004.12765", "revision_depth": 2} +{"before_sent": " Our 8-layer model with 110M parameters outperforms all baseline models with a large margin, showing the importance of utilizing linguistic structure in machine learning models.", "after_sent": " Our 8-layer model with 110M parameters outperforms the baseline models with a large margin, showing the importance of utilizing linguistic structure in machine learning models.", "before_sent_with_intent": " For evaluation purposes, we created a new dataset for humor detection consisting of 200k formal short texts (100k positive and 100k negative). Experimental results show that our proposed method can determine humor in short texts with accuracy and an F1-score of 98.2 percent. Our 8-layer model with 110M parameters outperforms all baseline models with a large margin, showing the importance of utilizing linguistic structure in machine learning models. ", "labels": "clarity", "doc_id": "2004.12765", "revision_depth": 2} +{"before_sent": " Our 8-layer model with 110M parameters outperforms all baseline models with a large margin, showing the importance of utilizing linguistic structure in machine learning models.", "after_sent": " Our 8-layer model with 110M parameters outperforms all baseline models with a large margin, showing the importance of utilizing linguistic structure of texts in machine learning models.", "before_sent_with_intent": " For evaluation purposes, we created a new dataset for humor detection consisting of 200k formal short texts (100k positive and 100k negative). Experimental results show that our proposed method can determine humor in short texts with accuracy and an F1-score of 98.2 percent. Our 8-layer model with 110M parameters outperforms all baseline models with a large margin, showing the importance of utilizing linguistic structure in machine learning models. ", "labels": "clarity", "doc_id": "2004.12765", "revision_depth": 2} +{"before_sent": "The Arabic language is a morphological rich language, posing many challenges for information extraction (IE) tasks, including Named Entity Recognition (NER), Part-of-Speech tagging (POS), Argument Role Labeling (ARL) and Relation Extraction (RE).", "after_sent": "Arabic is a morphological rich language, posing many challenges for information extraction (IE) tasks, including Named Entity Recognition (NER), Part-of-Speech tagging (POS), Argument Role Labeling (ARL) and Relation Extraction (RE).", "before_sent_with_intent": " The Arabic language is a morphological rich language, posing many challenges for information extraction (IE) tasks, including Named Entity Recognition (NER), Part-of-Speech tagging (POS), Argument Role Labeling (ARL) and Relation Extraction (RE). A few multilingual pre-trained models have been proposed and show good performance for Arabic, however, most experiment results are reported on language understanding tasks, such as natural language inference, question answering and sentiment analysis. Their performance on the IE tasks is less known, in particular, the cross-lingual transfer capability from English to Arabic.", "labels": "clarity", "doc_id": "2004.14519", "revision_depth": 1} +{"before_sent": "The Arabic language is a morphological rich language, posing many challenges for information extraction (IE) tasks, including Named Entity Recognition (NER), Part-of-Speech tagging (POS), Argument Role Labeling (ARL) and Relation Extraction (RE).", "after_sent": "The Arabic language is a morphological rich language, posing many challenges for information extraction (IE) tasks, including Named Entity Recognition (NER), Part-of-Speech tagging (POS), Argument Role Labeling (ARL) , and Relation Extraction (RE).", "before_sent_with_intent": " The Arabic language is a morphological rich language, posing many challenges for information extraction (IE) tasks, including Named Entity Recognition (NER), Part-of-Speech tagging (POS), Argument Role Labeling (ARL) and Relation Extraction (RE). A few multilingual pre-trained models have been proposed and show good performance for Arabic, however, most experiment results are reported on language understanding tasks, such as natural language inference, question answering and sentiment analysis. Their performance on the IE tasks is less known, in particular, the cross-lingual transfer capability from English to Arabic.", "labels": "fluency", "doc_id": "2004.14519", "revision_depth": 1} +{"before_sent": " In this work, we pre-train a Gigaword-based bilingual language model (GigaBERT) to study these two distant languages as well as zero-short transfer learning on the information extraction tasks.", "after_sent": " In this work, we pre-train a Gigaword-based bilingual language model (GigaBERT) to study these two distant languages as well as zero-short transfer learning on various IE tasks.", "before_sent_with_intent": " A few multilingual pre-trained models have been proposed and show good performance for Arabic, however, most experiment results are reported on language understanding tasks, such as natural language inference, question answering and sentiment analysis. Their performance on the IE tasks is less known, in particular, the cross-lingual transfer capability from English to Arabic. In this work, we pre-train a Gigaword-based bilingual language model (GigaBERT) to study these two distant languages as well as zero-short transfer learning on the information extraction tasks. Our GigaBERT model can outperform mBERT and XLM-R-base on NER, POS and ARL tasks, with regarding to the per-language and /or zero-transfer performance. We make our pre-trained models publicly available at URL to facilitate the research of this field.", "labels": "clarity", "doc_id": "2004.14519", "revision_depth": 1} +{"before_sent": "We make our pre-trained models publicly available at URL to facilitate the research of this field.", "after_sent": "We makeWe have made our pre-trained models publicly available at URL to facilitate the research of this field.", "before_sent_with_intent": " In this work, we pre-train a Gigaword-based bilingual language model (GigaBERT) to study these two distant languages as well as zero-short transfer learning on the information extraction tasks. Our GigaBERT model can outperform mBERT and XLM-R-base on NER, POS and ARL tasks, with regarding to the per-language and /or zero-transfer performance. We make our pre-trained models publicly available at URL to facilitate the research of this field. ", "labels": "clarity", "doc_id": "2004.14519", "revision_depth": 1} +{"before_sent": "We make our pre-trained models publicly available at URL to facilitate the research of this field.", "after_sent": "We make our pre-trained models publicly available at URL ", "before_sent_with_intent": " In this work, we pre-train a Gigaword-based bilingual language model (GigaBERT) to study these two distant languages as well as zero-short transfer learning on the information extraction tasks. Our GigaBERT model can outperform mBERT and XLM-R-base on NER, POS and ARL tasks, with regarding to the per-language and /or zero-transfer performance. We make our pre-trained models publicly available at URL to facilitate the research of this field. ", "labels": "clarity", "doc_id": "2004.14519", "revision_depth": 1} +{"before_sent": "In adversarial (challenge) testing, we pose hard generalization tasks in order to gain insights into the solutions found by our models.", "after_sent": "In adversarial testing, we pose hard generalization tasks in order to gain insights into the solutions found by our models.", "before_sent_with_intent": " In adversarial (challenge) testing, we pose hard generalization tasks in order to gain insights into the solutions found by our models. What properties must a system have in order to succeed at these hard tasks? In this paper, we argue that an essential factor is the ability to form modular representations .", "labels": "coherence", "doc_id": "2004.14623", "revision_depth": 1} +{"before_sent": " In this paper, we argue that an essential factor is the ability to form modular representations .", "after_sent": " In this paper, we argue that an essential factor is modular internal structure .", "before_sent_with_intent": " In adversarial (challenge) testing, we pose hard generalization tasks in order to gain insights into the solutions found by our models. What properties must a system have in order to succeed at these hard tasks? In this paper, we argue that an essential factor is the ability to form modular representations . Our central contribution is a definition of what it means for a representation to be modular and an experimental method for assessing the extent to which a system's solution is modular in this general sense . Our work is grounded empirically in a new challenge Natural Language Inference dataset designed to assess systems on their ability to reason about entailment and negation.", "labels": "clarity", "doc_id": "2004.14623", "revision_depth": 1} +{"before_sent": " Our central contribution is a definition of what it means for a representation to be modular and an experimental method for assessing the extent to which a system's solution is modular in this general sense .", "after_sent": " Our central contribution is a new experimental method called 'interchange interventions', in which systematic manipulations of model-internal states are related to causal effects on their outputs, thereby allowing us to identify modular structure .", "before_sent_with_intent": " What properties must a system have in order to succeed at these hard tasks? In this paper, we argue that an essential factor is the ability to form modular representations . Our central contribution is a definition of what it means for a representation to be modular and an experimental method for assessing the extent to which a system's solution is modular in this general sense . Our work is grounded empirically in a new challenge Natural Language Inference dataset designed to assess systems on their ability to reason about entailment and negation. We find that a BERT model with fine-tuning is strikingly successful at the hard generalization tasks we pose using this dataset, and our active manipulations help us to understand why: despite the densely interconnected nature of the BERT architecture, the learned model embeds modular, general theories of lexical entailment relations.", "labels": "clarity", "doc_id": "2004.14623", "revision_depth": 1} +{"before_sent": " We find that a BERT model with fine-tuning is strikingly successful at the hard generalization tasks we pose using this dataset, and our active manipulations help us to understand why: despite the densely interconnected nature of the BERT architecture, the learned model embeds modular, general theories of lexical entailment relations.", "after_sent": " We find that a BERT model is strikingly successful at the hard generalization tasks we pose using this dataset, and our active manipulations help us to understand why: despite the densely interconnected nature of the BERT architecture, the learned model embeds modular, general theories of lexical entailment relations.", "before_sent_with_intent": " Our central contribution is a definition of what it means for a representation to be modular and an experimental method for assessing the extent to which a system's solution is modular in this general sense . Our work is grounded empirically in a new challenge Natural Language Inference dataset designed to assess systems on their ability to reason about entailment and negation. We find that a BERT model with fine-tuning is strikingly successful at the hard generalization tasks we pose using this dataset, and our active manipulations help us to understand why: despite the densely interconnected nature of the BERT architecture, the learned model embeds modular, general theories of lexical entailment relations. ", "labels": "clarity", "doc_id": "2004.14623", "revision_depth": 1} +{"before_sent": " We find that a BERT model with fine-tuning is strikingly successful at the hard generalization tasks we pose using this dataset, and our active manipulations help us to understand why: despite the densely interconnected nature of the BERT architecture, the learned model embeds modular, general theories of lexical entailment relations.", "after_sent": " We find that a BERT model with fine-tuning is strikingly successful at the systematic generalization task we pose using this dataset, and our active manipulations help us to understand why: despite the densely interconnected nature of the BERT architecture, the learned model embeds modular, general theories of lexical entailment relations.", "before_sent_with_intent": " Our central contribution is a definition of what it means for a representation to be modular and an experimental method for assessing the extent to which a system's solution is modular in this general sense . Our work is grounded empirically in a new challenge Natural Language Inference dataset designed to assess systems on their ability to reason about entailment and negation. We find that a BERT model with fine-tuning is strikingly successful at the hard generalization tasks we pose using this dataset, and our active manipulations help us to understand why: despite the densely interconnected nature of the BERT architecture, the learned model embeds modular, general theories of lexical entailment relations. ", "labels": "clarity", "doc_id": "2004.14623", "revision_depth": 1} +{"before_sent": " We find that a BERT model with fine-tuning is strikingly successful at the hard generalization tasks we pose using this dataset, and our active manipulations help us to understand why: despite the densely interconnected nature of the BERT architecture, the learned model embeds modular, general theories of lexical entailment relations.", "after_sent": " We find that a BERT model with fine-tuning is strikingly successful at the hard generalization tasks we pose using this dataset, and our active manipulations of model-internal vectors help us understand why: despite the densely interconnected nature of the BERT architecture, the learned model embeds modular, general theories of lexical entailment relations.", "before_sent_with_intent": " Our central contribution is a definition of what it means for a representation to be modular and an experimental method for assessing the extent to which a system's solution is modular in this general sense . Our work is grounded empirically in a new challenge Natural Language Inference dataset designed to assess systems on their ability to reason about entailment and negation. We find that a BERT model with fine-tuning is strikingly successful at the hard generalization tasks we pose using this dataset, and our active manipulations help us to understand why: despite the densely interconnected nature of the BERT architecture, the learned model embeds modular, general theories of lexical entailment relations. ", "labels": "clarity", "doc_id": "2004.14623", "revision_depth": 1} +{"before_sent": "For the automatic evaluation of Generative Question Answering (genQA ) systems, it is essential to assess the correctness of the generated answers .", "after_sent": "In the automatic evaluation of Generative Question Answering (genQA ) systems, it is essential to assess the correctness of the generated answers .", "before_sent_with_intent": " For the automatic evaluation of Generative Question Answering (genQA ) systems, it is essential to assess the correctness of the generated answers . However, n-gram similarity metrics, which are widely used to compare generated texts and references, are prone to misjudge fact-based assessments . Moreover, there is a lack of benchmark datasets to measure the quality of metrics in terms of the correctness.", "labels": "coherence", "doc_id": "2005.00192", "revision_depth": 1} +{"before_sent": "For the automatic evaluation of Generative Question Answering (genQA ) systems, it is essential to assess the correctness of the generated answers .", "after_sent": "For the automatic evaluation of generative question answering (GenQA ) systems, it is essential to assess the correctness of the generated answers .", "before_sent_with_intent": " For the automatic evaluation of Generative Question Answering (genQA ) systems, it is essential to assess the correctness of the generated answers . However, n-gram similarity metrics, which are widely used to compare generated texts and references, are prone to misjudge fact-based assessments . Moreover, there is a lack of benchmark datasets to measure the quality of metrics in terms of the correctness.", "labels": "fluency", "doc_id": "2005.00192", "revision_depth": 1} +{"before_sent": "For the automatic evaluation of Generative Question Answering (genQA ) systems, it is essential to assess the correctness of the generated answers .", "after_sent": "For the automatic evaluation of Generative Question Answering (genQA ) systems, it is difficult to assess the correctness of the generated answers .", "before_sent_with_intent": " For the automatic evaluation of Generative Question Answering (genQA ) systems, it is essential to assess the correctness of the generated answers . However, n-gram similarity metrics, which are widely used to compare generated texts and references, are prone to misjudge fact-based assessments . Moreover, there is a lack of benchmark datasets to measure the quality of metrics in terms of the correctness.", "labels": "clarity", "doc_id": "2005.00192", "revision_depth": 1} +{"before_sent": " Moreover, there is a lack of benchmark datasets to measure the quality of metrics in terms of the correctness.", "after_sent": " Moreover, there is a lack of benchmark datasets to evaluate the suitability of existing metrics in terms of the correctness.", "before_sent_with_intent": " For the automatic evaluation of Generative Question Answering (genQA ) systems, it is essential to assess the correctness of the generated answers . However, n-gram similarity metrics, which are widely used to compare generated texts and references, are prone to misjudge fact-based assessments . Moreover, there is a lack of benchmark datasets to measure the quality of metrics in terms of the correctness. To study a better metric for genQA, we collect high-quality human judgments of correctness on two standard genQA datasets. Using our human-evaluation datasets, we show that existing metrics based on n-gram similarity do not correlate with human judgments.", "labels": "clarity", "doc_id": "2005.00192", "revision_depth": 1} +{"before_sent": " Moreover, there is a lack of benchmark datasets to measure the quality of metrics in terms of the correctness.", "after_sent": " Moreover, there is a lack of benchmark datasets to measure the quality of metrics in terms of correctness.", "before_sent_with_intent": " For the automatic evaluation of Generative Question Answering (genQA ) systems, it is essential to assess the correctness of the generated answers . However, n-gram similarity metrics, which are widely used to compare generated texts and references, are prone to misjudge fact-based assessments . Moreover, there is a lack of benchmark datasets to measure the quality of metrics in terms of the correctness. To study a better metric for genQA, we collect high-quality human judgments of correctness on two standard genQA datasets. Using our human-evaluation datasets, we show that existing metrics based on n-gram similarity do not correlate with human judgments.", "labels": "fluency", "doc_id": "2005.00192", "revision_depth": 1} +{"before_sent": " To study a better metric for genQA, we collect high-quality human judgments of correctness on two standard genQA datasets.", "after_sent": " To study a better metric for GenQA, we first create high-quality human judgments of correctness on two standard genQA datasets.", "before_sent_with_intent": " However, n-gram similarity metrics, which are widely used to compare generated texts and references, are prone to misjudge fact-based assessments . Moreover, there is a lack of benchmark datasets to measure the quality of metrics in terms of the correctness. To study a better metric for genQA, we collect high-quality human judgments of correctness on two standard genQA datasets. Using our human-evaluation datasets, we show that existing metrics based on n-gram similarity do not correlate with human judgments. To alleviate this problem, we propose a new metric for evaluating the correctness of genQA . Specifically, the new metric assigns different weights on each token via keyphrase prediction, thereby judging whether a predicted answer sentence captures the key meaning of the human judge's ground-truth .", "labels": "clarity", "doc_id": "2005.00192", "revision_depth": 1} +{"before_sent": " To study a better metric for genQA, we collect high-quality human judgments of correctness on two standard genQA datasets.", "after_sent": " To study a better metric for genQA, we collect high-quality human judgments of correctness on two standard GenQA datasets.", "before_sent_with_intent": " However, n-gram similarity metrics, which are widely used to compare generated texts and references, are prone to misjudge fact-based assessments . Moreover, there is a lack of benchmark datasets to measure the quality of metrics in terms of the correctness. To study a better metric for genQA, we collect high-quality human judgments of correctness on two standard genQA datasets. Using our human-evaluation datasets, we show that existing metrics based on n-gram similarity do not correlate with human judgments. To alleviate this problem, we propose a new metric for evaluating the correctness of genQA . Specifically, the new metric assigns different weights on each token via keyphrase prediction, thereby judging whether a predicted answer sentence captures the key meaning of the human judge's ground-truth .", "labels": "fluency", "doc_id": "2005.00192", "revision_depth": 1} +{"before_sent": " Using our human-evaluation datasets, we show that existing metrics based on n-gram similarity do not correlate with human judgments.", "after_sent": " Using our human-evaluation datasets, we show that widely used n-gram similarity do not correlate with human judgments.", "before_sent_with_intent": " Moreover, there is a lack of benchmark datasets to measure the quality of metrics in terms of the correctness. To study a better metric for genQA, we collect high-quality human judgments of correctness on two standard genQA datasets. Using our human-evaluation datasets, we show that existing metrics based on n-gram similarity do not correlate with human judgments. To alleviate this problem, we propose a new metric for evaluating the correctness of genQA . Specifically, the new metric assigns different weights on each token via keyphrase prediction, thereby judging whether a predicted answer sentence captures the key meaning of the human judge's ground-truth . Our proposed metric shows a significantly higher correlation with human judgment than widely used existing metrics .", "labels": "clarity", "doc_id": "2005.00192", "revision_depth": 1} +{"before_sent": " Using our human-evaluation datasets, we show that existing metrics based on n-gram similarity do not correlate with human judgments.", "after_sent": " Using our human-evaluation datasets, we show that existing metrics based on n-gram similarity metrics do not correlate with human judgments.", "before_sent_with_intent": " Moreover, there is a lack of benchmark datasets to measure the quality of metrics in terms of the correctness. To study a better metric for genQA, we collect high-quality human judgments of correctness on two standard genQA datasets. Using our human-evaluation datasets, we show that existing metrics based on n-gram similarity do not correlate with human judgments. To alleviate this problem, we propose a new metric for evaluating the correctness of genQA . Specifically, the new metric assigns different weights on each token via keyphrase prediction, thereby judging whether a predicted answer sentence captures the key meaning of the human judge's ground-truth . Our proposed metric shows a significantly higher correlation with human judgment than widely used existing metrics .", "labels": "clarity", "doc_id": "2005.00192", "revision_depth": 1} +{"before_sent": " To alleviate this problem, we propose a new metric for evaluating the correctness of genQA . Specifically, the new metric assigns different weights on each token via keyphrase prediction, thereby judging whether a predicted answer sentence captures the key meaning of the human judge's ground-truth .", "after_sent": " To alleviate this problem, we propose a new metric for evaluating the correctness of GenQA . Specifically, the new metric assigns different weights on each token via keyphrase prediction, thereby judging whether a predicted answer sentence captures the key meaning of the human judge's ground-truth .", "before_sent_with_intent": " To study a better metric for genQA, we collect high-quality human judgments of correctness on two standard genQA datasets. Using our human-evaluation datasets, we show that existing metrics based on n-gram similarity do not correlate with human judgments. To alleviate this problem, we propose a new metric for evaluating the correctness of genQA . Specifically, the new metric assigns different weights on each token via keyphrase prediction, thereby judging whether a predicted answer sentence captures the key meaning of the human judge's ground-truth . Our proposed metric shows a significantly higher correlation with human judgment than widely used existing metrics .", "labels": "fluency", "doc_id": "2005.00192", "revision_depth": 1} +{"before_sent": " To alleviate this problem, we propose a new metric for evaluating the correctness of genQA . Specifically, the new metric assigns different weights on each token via keyphrase prediction, thereby judging whether a predicted answer sentence captures the key meaning of the human judge's ground-truth .", "after_sent": " To alleviate this problem, we propose a new metric for evaluating the correctness of genQA . Specifically, our new metric assigns different weights on each token via keyphrase prediction, thereby judging whether a predicted answer sentence captures the key meaning of the human judge's ground-truth .", "before_sent_with_intent": " To study a better metric for genQA, we collect high-quality human judgments of correctness on two standard genQA datasets. Using our human-evaluation datasets, we show that existing metrics based on n-gram similarity do not correlate with human judgments. To alleviate this problem, we propose a new metric for evaluating the correctness of genQA . Specifically, the new metric assigns different weights on each token via keyphrase prediction, thereby judging whether a predicted answer sentence captures the key meaning of the human judge's ground-truth . Our proposed metric shows a significantly higher correlation with human judgment than widely used existing metrics .", "labels": "clarity", "doc_id": "2005.00192", "revision_depth": 1} +{"before_sent": " To alleviate this problem, we propose a new metric for evaluating the correctness of genQA . Specifically, the new metric assigns different weights on each token via keyphrase prediction, thereby judging whether a predicted answer sentence captures the key meaning of the human judge's ground-truth .", "after_sent": " To alleviate this problem, we propose a new metric for evaluating the correctness of genQA . Specifically, the new metric assigns different weights to each token via keyphrase prediction, thereby judging whether a predicted answer sentence captures the key meaning of the human judge's ground-truth .", "before_sent_with_intent": " To study a better metric for genQA, we collect high-quality human judgments of correctness on two standard genQA datasets. Using our human-evaluation datasets, we show that existing metrics based on n-gram similarity do not correlate with human judgments. To alleviate this problem, we propose a new metric for evaluating the correctness of genQA . Specifically, the new metric assigns different weights on each token via keyphrase prediction, thereby judging whether a predicted answer sentence captures the key meaning of the human judge's ground-truth . Our proposed metric shows a significantly higher correlation with human judgment than widely used existing metrics .", "labels": "fluency", "doc_id": "2005.00192", "revision_depth": 1} +{"before_sent": " To alleviate this problem, we propose a new metric for evaluating the correctness of genQA . Specifically, the new metric assigns different weights on each token via keyphrase prediction, thereby judging whether a predicted answer sentence captures the key meaning of the human judge's ground-truth .", "after_sent": " To alleviate this problem, we propose a new metric for evaluating the correctness of genQA . Specifically, the new metric assigns different weights on each token via keyphrase prediction, thereby judging whether a generated answer sentence captures the key meaning of the human judge's ground-truth .", "before_sent_with_intent": " To study a better metric for genQA, we collect high-quality human judgments of correctness on two standard genQA datasets. Using our human-evaluation datasets, we show that existing metrics based on n-gram similarity do not correlate with human judgments. To alleviate this problem, we propose a new metric for evaluating the correctness of genQA . Specifically, the new metric assigns different weights on each token via keyphrase prediction, thereby judging whether a predicted answer sentence captures the key meaning of the human judge's ground-truth . Our proposed metric shows a significantly higher correlation with human judgment than widely used existing metrics .", "labels": "clarity", "doc_id": "2005.00192", "revision_depth": 1} +{"before_sent": " To alleviate this problem, we propose a new metric for evaluating the correctness of genQA . Specifically, the new metric assigns different weights on each token via keyphrase prediction, thereby judging whether a predicted answer sentence captures the key meaning of the human judge's ground-truth .", "after_sent": " To alleviate this problem, we propose a new metric for evaluating the correctness of genQA . Specifically, the new metric assigns different weights on each token via keyphrase prediction, thereby judging whether a predicted answer sentence captures the key meaning of the reference answer .", "before_sent_with_intent": " To study a better metric for genQA, we collect high-quality human judgments of correctness on two standard genQA datasets. Using our human-evaluation datasets, we show that existing metrics based on n-gram similarity do not correlate with human judgments. To alleviate this problem, we propose a new metric for evaluating the correctness of genQA . Specifically, the new metric assigns different weights on each token via keyphrase prediction, thereby judging whether a predicted answer sentence captures the key meaning of the human judge's ground-truth . Our proposed metric shows a significantly higher correlation with human judgment than widely used existing metrics .", "labels": "clarity", "doc_id": "2005.00192", "revision_depth": 1} +{"before_sent": " Our proposed metric shows a significantly higher correlation with human judgment than widely used existing metrics .", "after_sent": " Our proposed metric shows a significantly higher correlation with human judgments than existing metrics in various datasets .", "before_sent_with_intent": " Using our human-evaluation datasets, we show that existing metrics based on n-gram similarity do not correlate with human judgments. To alleviate this problem, we propose a new metric for evaluating the correctness of genQA . Specifically, the new metric assigns different weights on each token via keyphrase prediction, thereby judging whether a predicted answer sentence captures the key meaning of the human judge's ground-truth . Our proposed metric shows a significantly higher correlation with human judgment than widely used existing metrics . ", "labels": "clarity", "doc_id": "2005.00192", "revision_depth": 1} +{"before_sent": " Our approach uses a lightweight probing model that learns to map language representations of concrete words to the visual domain .", "after_sent": " Our approach uses a probing model that learns to map language representations of concrete words to the visual domain .", "before_sent_with_intent": " Vision, as a central component of human perception, plays a fundamental role in shaping natural language . To better understand how text models are connected to our visual perceptions , we propose a method for examining the similarities between neural representations extracted from words in text and objects in images . Our approach uses a lightweight probing model that learns to map language representations of concrete words to the visual domain . We find that representations from models trained on purely textual data, such as BERT, can be nontrivially mapped to those of a vision model. Such mappings generalize to object categories that were never seen by the probe during training, unlike mappings learned from permuted or random representations .", "labels": "coherence", "doc_id": "2005.00619", "revision_depth": 1} +{"before_sent": " Our approach uses a lightweight probing model that learns to map language representations of concrete words to the visual domain .", "after_sent": " Our approach uses a lightweight probing model that examines how useful language representations are in discerning between different visual representations. We show evidence of a surprising common ground with the visual domain .", "before_sent_with_intent": " Vision, as a central component of human perception, plays a fundamental role in shaping natural language . To better understand how text models are connected to our visual perceptions , we propose a method for examining the similarities between neural representations extracted from words in text and objects in images . Our approach uses a lightweight probing model that learns to map language representations of concrete words to the visual domain . We find that representations from models trained on purely textual data, such as BERT, can be nontrivially mapped to those of a vision model. Such mappings generalize to object categories that were never seen by the probe during training, unlike mappings learned from permuted or random representations .", "labels": "clarity", "doc_id": "2005.00619", "revision_depth": 1} +{"before_sent": " Moreover, we find that the context surrounding objects in sentences greatly impacts performance.", "after_sent": " ", "before_sent_with_intent": " We find that representations from models trained on purely textual data, such as BERT, can be nontrivially mapped to those of a vision model. Such mappings generalize to object categories that were never seen by the probe during training, unlike mappings learned from permuted or random representations . Moreover, we find that the context surrounding objects in sentences greatly impacts performance. Finally, we show that humans significantly outperform all examined models , suggesting considerable room for improvement in representation learning and grounding .", "labels": "clarity", "doc_id": "2005.00619", "revision_depth": 1} +{"before_sent": " Finally, we show that humans significantly outperform all examined models , suggesting considerable room for improvement in representation learning and grounding .", "after_sent": " Finally, we show that the examined models substantially under-perform humans in retrieval. Altogether, our findings shed new empirical insights on language grounding, suggesting that some physical properties are being captured by trained language models, and highlighting large room for future progress .", "before_sent_with_intent": " Such mappings generalize to object categories that were never seen by the probe during training, unlike mappings learned from permuted or random representations . Moreover, we find that the context surrounding objects in sentences greatly impacts performance. Finally, we show that humans significantly outperform all examined models , suggesting considerable room for improvement in representation learning and grounding . ", "labels": "clarity", "doc_id": "2005.00619", "revision_depth": 1} +{"before_sent": " In this work, we propose a method for characterizing how language representations of concrete nouns relate to the physical appearance of the objects they refer to.", "after_sent": " In this work, we characterize how contextual representations of concrete nouns relate to the physical appearance of the objects they refer to.", "before_sent_with_intent": " While large-scale language models have enjoyed great success recently, much remains to be understood about what is encoded in their representations. In this work, we propose a method for characterizing how language representations of concrete nouns relate to the physical appearance of the objects they refer to. Our approach uses a probing model that examines how useful language representations are in discerning between different visual representations. We show evidence of a surprising common ground with the visual domain, finding representations of many language models to be useful in retrieving semantically aligned image patches .", "labels": "clarity", "doc_id": "2005.00619", "revision_depth": 2} +{"before_sent": " In this work, we propose a method for characterizing how language representations of concrete nouns relate to the physical appearance of the objects they refer to.", "after_sent": " In this work, we propose a method for characterizing how language representations of concrete nouns relate to the physical properties of the objects they refer to.", "before_sent_with_intent": " While large-scale language models have enjoyed great success recently, much remains to be understood about what is encoded in their representations. In this work, we propose a method for characterizing how language representations of concrete nouns relate to the physical appearance of the objects they refer to. Our approach uses a probing model that examines how useful language representations are in discerning between different visual representations. We show evidence of a surprising common ground with the visual domain, finding representations of many language models to be useful in retrieving semantically aligned image patches .", "labels": "clarity", "doc_id": "2005.00619", "revision_depth": 2} +{"before_sent": " Our approach uses a probing model that examines how useful language representations are in discerning between different visual representations.", "after_sent": " Our approach uses a probing model that examines how effective these language representations are in discerning between different visual representations.", "before_sent_with_intent": "