ratishsp commited on
Commit
fc33f0e
1 Parent(s): 7f75f82
Files changed (1) hide show
  1. mlb_data_to_text.json +3 -3
mlb_data_to_text.json CHANGED
@@ -19,7 +19,7 @@
19
  "English"
20
  ],
21
  "intended-use": "The dataset can be used to study data-to-text generation. The dataset is in sports domain. It pairs statistics of Major League Baseball (MLB) game with its summary. The summary is in the form of a document containing an average of 540 tokens. Thus it is useful to study long document generation.",
22
- "license-other": "N/A",
23
  "task": "Data-to-Text",
24
  "communicative": "Produce a summary of MLB game from its statistics. "
25
  },
@@ -72,7 +72,7 @@
72
  "has-consent": "no",
73
  "consent-policy": "N/A",
74
  "consent-other": "N/A",
75
- "no-consent-justification": "The data is obtained using a web scrape."
76
  },
77
  "pii": {
78
  "has-pii": "no PII",
@@ -95,7 +95,7 @@
95
  "gem": {
96
  "rationale": {
97
  "sole-task-dataset": "yes",
98
- "distinction-description": "Compared to RotoWire, MLB summaries are longer (approximately by 50%) and the input records are richer and more structured (with the addition of play-by-play). Moreover, the MLB dataset is five times larger in terms of data size (i.e., pairs of tables and game summaries).",
99
  "contribution": "This dataset can verify if models are capable of long document generation. The challenges in long document generation conditioned on input tables include ensuring coherent output, staying faithful to the input, ensuring fluent output and avoiding repetition of text. Such aspects can be verified on models trained on this dataset",
100
  "sole-language-task-dataset": "no",
101
  "model-ability": "Long document generation, coherent ordering of information, faithfulness to the input statistics, fluency in generation and avoiding repetition of text."
 
19
  "English"
20
  ],
21
  "intended-use": "The dataset can be used to study data-to-text generation. The dataset is in sports domain. It pairs statistics of Major League Baseball (MLB) game with its summary. The summary is in the form of a document containing an average of 540 tokens. Thus it is useful to study long document generation.",
22
+ "license-other": "Restricted to non-commercial research purposes.",
23
  "task": "Data-to-Text",
24
  "communicative": "Produce a summary of MLB game from its statistics. "
25
  },
 
72
  "has-consent": "no",
73
  "consent-policy": "N/A",
74
  "consent-other": "N/A",
75
+ "no-consent-justification": "The copyright remains with the original data creators and the usage permission is restricted to non-commercial uses."
76
  },
77
  "pii": {
78
  "has-pii": "no PII",
 
95
  "gem": {
96
  "rationale": {
97
  "sole-task-dataset": "yes",
98
+ "distinction-description": "Compared to the existing RotoWire (Wiseman et al. 2017) dataset, MLB summaries are longer (approximately by 50%) and the input records are richer and more structured (with the addition of play-by-play). Moreover, the MLB dataset is five times larger in terms of data size (i.e., pairs of tables and game summaries).",
99
  "contribution": "This dataset can verify if models are capable of long document generation. The challenges in long document generation conditioned on input tables include ensuring coherent output, staying faithful to the input, ensuring fluent output and avoiding repetition of text. Such aspects can be verified on models trained on this dataset",
100
  "sole-language-task-dataset": "no",
101
  "model-ability": "Long document generation, coherent ordering of information, faithfulness to the input statistics, fluency in generation and avoiding repetition of text."