diff --git a/README.md b/README.md index 2628374e6be67666681bac375c6a2236145a9f0e..7ca7843191cc88734106dc3a48449aafa7f3d3f4 100644 --- a/README.md +++ b/README.md @@ -42,106 +42,43 @@ tags: - cross-lingual-similarity - headline-classification dataset_info: -- config_name: wnli.en - features: - - name: hypothesis - dtype: string - - name: premise - dtype: string - - name: label - dtype: - class_label: - names: - '0': not_entailment - '1': entailment - '2': None - splits: - - name: train - num_bytes: 104577 - num_examples: 635 - - name: validation - num_bytes: 11886 - num_examples: 71 - - name: test - num_bytes: 37305 - num_examples: 146 - download_size: 591249 - dataset_size: 153768 -- config_name: wnli.hi +- config_name: actsa-sc.te features: - - name: hypothesis - dtype: string - - name: premise + - name: text dtype: string - name: label dtype: class_label: names: - '0': not_entailment - '1': entailment - '2': None + '0': positive + '1': negative splits: - name: train - num_bytes: 253342 - num_examples: 635 + num_bytes: 1370907 + num_examples: 4328 - name: validation - num_bytes: 28684 - num_examples: 71 + num_bytes: 166089 + num_examples: 541 - name: test - num_bytes: 90831 - num_examples: 146 - download_size: 591249 - dataset_size: 372857 -- config_name: wnli.gu + num_bytes: 168291 + num_examples: 541 + download_size: 727630 + dataset_size: 1705287 +- config_name: bbca.hi features: - - name: hypothesis - dtype: string - - name: premise - dtype: string - name: label - dtype: - class_label: - names: - '0': not_entailment - '1': entailment - '2': None - splits: - - name: train - num_bytes: 251562 - num_examples: 635 - - name: validation - num_bytes: 28183 - num_examples: 71 - - name: test - num_bytes: 94586 - num_examples: 146 - download_size: 591249 - dataset_size: 374331 -- config_name: wnli.mr - features: - - name: hypothesis dtype: string - - name: premise + - name: text dtype: string - - name: label - dtype: - class_label: - names: - '0': not_entailment - '1': entailment - '2': None splits: - name: train - num_bytes: 256657 - num_examples: 635 - - name: validation - num_bytes: 29226 - num_examples: 71 + num_bytes: 22126205 + num_examples: 3467 - name: test - num_bytes: 97136 - num_examples: 146 - download_size: 591249 - dataset_size: 383019 + num_bytes: 5501148 + num_examples: 866 + download_size: 10349015 + dataset_size: 27627353 - config_name: copa.en features: - name: premise @@ -156,17 +93,17 @@ dataset_info: dtype: int32 splits: - name: train - num_bytes: 46049 + num_bytes: 46033 num_examples: 400 - name: validation - num_bytes: 11695 + num_bytes: 11679 num_examples: 100 - name: test - num_bytes: 55862 + num_bytes: 55846 num_examples: 500 - download_size: 757679 - dataset_size: 113606 -- config_name: copa.hi + download_size: 79431 + dataset_size: 113558 +- config_name: copa.gu features: - name: premise dtype: string @@ -180,17 +117,17 @@ dataset_info: dtype: int32 splits: - name: train - num_bytes: 93392 + num_bytes: 92097 num_examples: 362 - name: validation - num_bytes: 23575 + num_bytes: 23450 num_examples: 88 - name: test - num_bytes: 112846 - num_examples: 449 - download_size: 757679 - dataset_size: 229813 -- config_name: copa.gu + num_bytes: 109997 + num_examples: 448 + download_size: 107668 + dataset_size: 225544 +- config_name: copa.hi features: - name: premise dtype: string @@ -204,16 +141,16 @@ dataset_info: dtype: int32 splits: - name: train - num_bytes: 92113 + num_bytes: 93376 num_examples: 362 - name: validation - num_bytes: 23466 + num_bytes: 23559 num_examples: 88 - name: test - num_bytes: 110013 - num_examples: 448 - download_size: 757679 - dataset_size: 225592 + num_bytes: 112830 + num_examples: 449 + download_size: 104233 + dataset_size: 229765 - config_name: copa.mr features: - name: premise @@ -228,42 +165,16 @@ dataset_info: dtype: int32 splits: - name: train - num_bytes: 93457 + num_bytes: 93441 num_examples: 362 - name: validation - num_bytes: 23890 + num_bytes: 23874 num_examples: 88 - name: test - num_bytes: 112071 + num_bytes: 112055 num_examples: 449 - download_size: 757679 - dataset_size: 229418 -- config_name: sna.bn - features: - - name: text - dtype: string - - name: label - dtype: - class_label: - names: - '0': kolkata - '1': state - '2': national - '3': sports - '4': entertainment - '5': international - splits: - - name: train - num_bytes: 46070054 - num_examples: 11284 - - name: validation - num_bytes: 5648130 - num_examples: 1411 - - name: test - num_bytes: 5799983 - num_examples: 1411 - download_size: 11803096 - dataset_size: 57518167 + download_size: 105962 + dataset_size: 229370 - config_name: csqa.as features: - name: question @@ -280,10 +191,10 @@ dataset_info: sequence: string splits: - name: test - num_bytes: 3800555 + num_bytes: 3800523 num_examples: 2942 - download_size: 65099316 - dataset_size: 3800555 + download_size: 1390423 + dataset_size: 3800523 - config_name: csqa.bn features: - name: question @@ -300,10 +211,10 @@ dataset_info: sequence: string splits: - name: test - num_bytes: 54671146 + num_bytes: 54671018 num_examples: 38845 - download_size: 65099316 - dataset_size: 54671146 + download_size: 19648180 + dataset_size: 54671018 - config_name: csqa.gu features: - name: question @@ -320,10 +231,10 @@ dataset_info: sequence: string splits: - name: test - num_bytes: 29131703 + num_bytes: 29131607 num_examples: 22861 - download_size: 65099316 - dataset_size: 29131703 + download_size: 6027825 + dataset_size: 29131607 - config_name: csqa.hi features: - name: question @@ -340,10 +251,10 @@ dataset_info: sequence: string splits: - name: test - num_bytes: 40409475 + num_bytes: 40409347 num_examples: 35140 - download_size: 65099316 - dataset_size: 40409475 + download_size: 14711258 + dataset_size: 40409347 - config_name: csqa.kn features: - name: question @@ -360,10 +271,10 @@ dataset_info: sequence: string splits: - name: test - num_bytes: 21199880 + num_bytes: 21199816 num_examples: 13666 - download_size: 65099316 - dataset_size: 21199880 + download_size: 7669655 + dataset_size: 21199816 - config_name: csqa.ml features: - name: question @@ -380,10 +291,10 @@ dataset_info: sequence: string splits: - name: test - num_bytes: 47220932 + num_bytes: 47220836 num_examples: 26537 - download_size: 65099316 - dataset_size: 47220932 + download_size: 17382215 + dataset_size: 47220836 - config_name: csqa.mr features: - name: question @@ -400,10 +311,10 @@ dataset_info: sequence: string splits: - name: test - num_bytes: 13667238 + num_bytes: 13667174 num_examples: 11370 - download_size: 65099316 - dataset_size: 13667238 + download_size: 5072738 + dataset_size: 13667174 - config_name: csqa.or features: - name: question @@ -420,10 +331,10 @@ dataset_info: sequence: string splits: - name: test - num_bytes: 2562397 + num_bytes: 2562365 num_examples: 1975 - download_size: 65099316 - dataset_size: 2562397 + download_size: 948046 + dataset_size: 2562365 - config_name: csqa.pa features: - name: question @@ -440,10 +351,10 @@ dataset_info: sequence: string splits: - name: test - num_bytes: 5806129 + num_bytes: 5806097 num_examples: 5667 - download_size: 65099316 - dataset_size: 5806129 + download_size: 2194109 + dataset_size: 5806097 - config_name: csqa.ta features: - name: question @@ -460,10 +371,10 @@ dataset_info: sequence: string splits: - name: test - num_bytes: 61868609 + num_bytes: 61868481 num_examples: 38590 - download_size: 65099316 - dataset_size: 61868609 + download_size: 20789467 + dataset_size: 61868481 - config_name: csqa.te features: - name: question @@ -480,318 +391,164 @@ dataset_info: sequence: string splits: - name: test - num_bytes: 58785157 + num_bytes: 58784997 num_examples: 41338 - download_size: 65099316 - dataset_size: 58785157 -- config_name: wstp.as + download_size: 17447618 + dataset_size: 58784997 +- config_name: cvit-mkb-clsr.en-bn features: - - name: sectionText - dtype: string - - name: correctTitle - dtype: string - - name: titleA - dtype: string - - name: titleB - dtype: string - - name: titleC - dtype: string - - name: titleD + - name: sentence1 dtype: string - - name: url + - name: sentence2 dtype: string splits: - - name: train - num_bytes: 13581364 - num_examples: 5000 - - name: validation - num_bytes: 1698996 - num_examples: 625 - name: test - num_bytes: 1697678 - num_examples: 626 - download_size: 242008091 - dataset_size: 16978038 -- config_name: wstp.bn + num_bytes: 1990957 + num_examples: 5522 + download_size: 945551 + dataset_size: 1990957 +- config_name: cvit-mkb-clsr.en-gu features: - - name: sectionText - dtype: string - - name: correctTitle - dtype: string - - name: titleA - dtype: string - - name: titleB + - name: sentence1 dtype: string - - name: titleC + - name: sentence2 dtype: string - - name: titleD + splits: + - name: test + num_bytes: 2303377 + num_examples: 6463 + download_size: 1093313 + dataset_size: 2303377 +- config_name: cvit-mkb-clsr.en-hi + features: + - name: sentence1 dtype: string - - name: url + - name: sentence2 dtype: string splits: - - name: train - num_bytes: 143340597 - num_examples: 47580 - - name: validation - num_bytes: 17759264 - num_examples: 5947 - name: test - num_bytes: 17633893 - num_examples: 5948 - download_size: 242008091 - dataset_size: 178733754 -- config_name: wstp.gu + num_bytes: 1855989 + num_examples: 5169 + download_size: 890609 + dataset_size: 1855989 +- config_name: cvit-mkb-clsr.en-ml features: - - name: sectionText - dtype: string - - name: correctTitle + - name: sentence1 dtype: string - - name: titleA + - name: sentence2 dtype: string - - name: titleB + splits: + - name: test + num_bytes: 1990089 + num_examples: 4886 + download_size: 868956 + dataset_size: 1990089 +- config_name: cvit-mkb-clsr.en-mr + features: + - name: sentence1 dtype: string - - name: titleC + - name: sentence2 dtype: string - - name: titleD + splits: + - name: test + num_bytes: 2130601 + num_examples: 5760 + download_size: 993961 + dataset_size: 2130601 +- config_name: cvit-mkb-clsr.en-or + features: + - name: sentence1 dtype: string - - name: url + - name: sentence2 dtype: string splits: - - name: train - num_bytes: 39353520 - num_examples: 10004 - - name: validation - num_bytes: 4887780 - num_examples: 1251 - name: test - num_bytes: 4699186 - num_examples: 1251 - download_size: 242008091 - dataset_size: 48940486 -- config_name: wstp.hi + num_bytes: 274873 + num_examples: 752 + download_size: 134334 + dataset_size: 274873 +- config_name: cvit-mkb-clsr.en-ta features: - - name: sectionText + - name: sentence1 dtype: string - - name: correctTitle + - name: sentence2 dtype: string - - name: titleA + splits: + - name: test + num_bytes: 2565178 + num_examples: 5637 + download_size: 1091653 + dataset_size: 2565178 +- config_name: cvit-mkb-clsr.en-te + features: + - name: sentence1 dtype: string - - name: titleB + - name: sentence2 dtype: string - - name: titleC + splits: + - name: test + num_bytes: 1771129 + num_examples: 5049 + download_size: 840410 + dataset_size: 1771129 +- config_name: cvit-mkb-clsr.en-ur + features: + - name: sentence1 dtype: string - - name: titleD + - name: sentence2 dtype: string - - name: url + splits: + - name: test + num_bytes: 288430 + num_examples: 1006 + download_size: 166129 + dataset_size: 288430 +- config_name: iitp-mr.hi + features: + - name: text dtype: string + - name: label + dtype: + class_label: + names: + '0': negative + '1': neutral + '2': positive splits: - name: train - num_bytes: 158529718 - num_examples: 44069 + num_bytes: 6704905 + num_examples: 2480 - name: validation - num_bytes: 19371932 - num_examples: 5509 + num_bytes: 822218 + num_examples: 310 - name: test - num_bytes: 19593029 - num_examples: 5509 - download_size: 242008091 - dataset_size: 197494679 -- config_name: wstp.kn + num_bytes: 702373 + num_examples: 310 + download_size: 3151762 + dataset_size: 8229496 +- config_name: iitp-pr.hi features: - - name: sectionText - dtype: string - - name: correctTitle - dtype: string - - name: titleA - dtype: string - - name: titleB - dtype: string - - name: titleC - dtype: string - - name: titleD - dtype: string - - name: url + - name: text dtype: string + - name: label + dtype: + class_label: + names: + '0': negative + '1': neutral + '2': positive splits: - name: train - num_bytes: 139950425 - num_examples: 35379 + num_bytes: 945589 + num_examples: 4182 - name: validation - num_bytes: 17789810 - num_examples: 4422 + num_bytes: 120100 + num_examples: 523 - name: test - num_bytes: 17897059 - num_examples: 4423 - download_size: 242008091 - dataset_size: 175637294 -- config_name: wstp.ml - features: - - name: sectionText - dtype: string - - name: correctTitle - dtype: string - - name: titleA - dtype: string - - name: titleB - dtype: string - - name: titleC - dtype: string - - name: titleD - dtype: string - - name: url - dtype: string - splits: - - name: train - num_bytes: 88360588 - num_examples: 27527 - - name: validation - num_bytes: 11193368 - num_examples: 3441 - - name: test - num_bytes: 11150942 - num_examples: 3441 - download_size: 242008091 - dataset_size: 110704898 -- config_name: wstp.mr - features: - - name: sectionText - dtype: string - - name: correctTitle - dtype: string - - name: titleA - dtype: string - - name: titleB - dtype: string - - name: titleC - dtype: string - - name: titleD - dtype: string - - name: url - dtype: string - splits: - - name: train - num_bytes: 28302397 - num_examples: 10446 - - name: validation - num_bytes: 3328826 - num_examples: 1306 - - name: test - num_bytes: 3631712 - num_examples: 1306 - download_size: 242008091 - dataset_size: 35262935 -- config_name: wstp.or - features: - - name: sectionText - dtype: string - - name: correctTitle - dtype: string - - name: titleA - dtype: string - - name: titleB - dtype: string - - name: titleC - dtype: string - - name: titleD - dtype: string - - name: url - dtype: string - splits: - - name: train - num_bytes: 10900034 - num_examples: 4015 - - name: validation - num_bytes: 1264963 - num_examples: 502 - - name: test - num_bytes: 1344680 - num_examples: 502 - download_size: 242008091 - dataset_size: 13509677 -- config_name: wstp.pa - features: - - name: sectionText - dtype: string - - name: correctTitle - dtype: string - - name: titleA - dtype: string - - name: titleB - dtype: string - - name: titleC - dtype: string - - name: titleD - dtype: string - - name: url - dtype: string - splits: - - name: train - num_bytes: 22189758 - num_examples: 8772 - - name: validation - num_bytes: 2789214 - num_examples: 1097 - - name: test - num_bytes: 2685795 - num_examples: 1097 - download_size: 242008091 - dataset_size: 27664767 -- config_name: wstp.ta - features: - - name: sectionText - dtype: string - - name: correctTitle - dtype: string - - name: titleA - dtype: string - - name: titleB - dtype: string - - name: titleC - dtype: string - - name: titleD - dtype: string - - name: url - dtype: string - splits: - - name: train - num_bytes: 151929358 - num_examples: 48940 - - name: validation - num_bytes: 18817195 - num_examples: 6117 - - name: test - num_bytes: 18815099 - num_examples: 6118 - download_size: 242008091 - dataset_size: 189561652 -- config_name: wstp.te - features: - - name: sectionText - dtype: string - - name: correctTitle - dtype: string - - name: titleA - dtype: string - - name: titleB - dtype: string - - name: titleC - dtype: string - - name: titleD - dtype: string - - name: url - dtype: string - splits: - - name: train - num_bytes: 151696915 - num_examples: 80000 - - name: validation - num_bytes: 19003197 - num_examples: 10000 - - name: test - num_bytes: 18991941 - num_examples: 10000 - download_size: 242008091 - dataset_size: 189692053 + num_bytes: 121910 + num_examples: 523 + download_size: 509822 + dataset_size: 1187599 - config_name: inltkh.gu features: - name: text @@ -812,16 +569,16 @@ dataset_info: '9': neutral splits: - name: train - num_bytes: 883067 + num_bytes: 883063 num_examples: 5269 - name: validation - num_bytes: 111205 + num_bytes: 111201 num_examples: 659 - name: test - num_bytes: 110761 + num_bytes: 110757 num_examples: 659 - download_size: 2054771 - dataset_size: 1105033 + download_size: 515094 + dataset_size: 1105021 - config_name: inltkh.ml features: - name: text @@ -842,16 +599,16 @@ dataset_info: '9': neutral splits: - name: train - num_bytes: 1108149 + num_bytes: 1108145 num_examples: 5036 - name: validation - num_bytes: 140059 + num_bytes: 140055 num_examples: 630 - name: test - num_bytes: 138851 + num_bytes: 138847 num_examples: 630 - download_size: 2054771 - dataset_size: 1387059 + download_size: 571019 + dataset_size: 1387047 - config_name: inltkh.mr features: - name: text @@ -872,16 +629,16 @@ dataset_info: '9': neutral splits: - name: train - num_bytes: 1462618 + num_bytes: 1462614 num_examples: 9672 - name: validation - num_bytes: 180310 + num_bytes: 180306 num_examples: 1210 - name: test - num_bytes: 180562 + num_bytes: 180558 num_examples: 1210 - download_size: 2054771 - dataset_size: 1823490 + download_size: 840304 + dataset_size: 1823478 - config_name: inltkh.ta features: - name: text @@ -902,16 +659,16 @@ dataset_info: '9': neutral splits: - name: train - num_bytes: 2659573 + num_bytes: 2659569 num_examples: 5346 - name: validation - num_bytes: 316087 + num_bytes: 316083 num_examples: 669 - name: test - num_bytes: 320469 + num_bytes: 320465 num_examples: 669 - download_size: 2054771 - dataset_size: 3296129 + download_size: 1271262 + dataset_size: 3296117 - config_name: inltkh.te features: - name: text @@ -932,140 +689,39 @@ dataset_info: '9': neutral splits: - name: train - num_bytes: 1361671 + num_bytes: 1361667 num_examples: 4328 - name: validation - num_bytes: 170475 + num_bytes: 170471 num_examples: 541 - name: test - num_bytes: 173153 + num_bytes: 173149 num_examples: 541 - download_size: 2054771 - dataset_size: 1705299 -- config_name: bbca.hi + download_size: 726293 + dataset_size: 1705287 +- config_name: md.hi features: - - name: label + - name: sentence dtype: string - - name: text + - name: discourse_mode dtype: string + - name: story_number + dtype: int32 + - name: id + dtype: int32 splits: - name: train - num_bytes: 22126213 - num_examples: 3467 - - name: test - num_bytes: 5501156 - num_examples: 866 - download_size: 5770136 - dataset_size: 27627369 -- config_name: cvit-mkb-clsr.en-bn - features: - - name: sentence1 - dtype: string - - name: sentence2 - dtype: string - splits: - - name: test - num_bytes: 2002009 - num_examples: 5522 - download_size: 3702442 - dataset_size: 2002009 -- config_name: cvit-mkb-clsr.en-gu - features: - - name: sentence1 - dtype: string - - name: sentence2 - dtype: string - splits: - - name: test - num_bytes: 2316311 - num_examples: 6463 - download_size: 3702442 - dataset_size: 2316311 -- config_name: cvit-mkb-clsr.en-hi - features: - - name: sentence1 - dtype: string - - name: sentence2 - dtype: string - splits: - - name: test - num_bytes: 1866335 - num_examples: 5169 - download_size: 3702442 - dataset_size: 1866335 -- config_name: cvit-mkb-clsr.en-ml - features: - - name: sentence1 - dtype: string - - name: sentence2 - dtype: string - splits: - - name: test - num_bytes: 1999869 - num_examples: 4886 - download_size: 3702442 - dataset_size: 1999869 -- config_name: cvit-mkb-clsr.en-mr - features: - - name: sentence1 - dtype: string - - name: sentence2 - dtype: string - splits: - - name: test - num_bytes: 2142129 - num_examples: 5760 - download_size: 3702442 - dataset_size: 2142129 -- config_name: cvit-mkb-clsr.en-or - features: - - name: sentence1 - dtype: string - - name: sentence2 - dtype: string - splits: - - name: test - num_bytes: 276385 - num_examples: 752 - download_size: 3702442 - dataset_size: 276385 -- config_name: cvit-mkb-clsr.en-ta - features: - - name: sentence1 - dtype: string - - name: sentence2 - dtype: string - splits: - - name: test - num_bytes: 2576460 - num_examples: 5637 - download_size: 3702442 - dataset_size: 2576460 -- config_name: cvit-mkb-clsr.en-te - features: - - name: sentence1 - dtype: string - - name: sentence2 - dtype: string - splits: - - name: test - num_bytes: 1781235 - num_examples: 5049 - download_size: 3702442 - dataset_size: 1781235 -- config_name: cvit-mkb-clsr.en-ur - features: - - name: sentence1 - dtype: string - - name: sentence2 - dtype: string - splits: + num_bytes: 1672109 + num_examples: 7974 + - name: validation + num_bytes: 211187 + num_examples: 997 - name: test - num_bytes: 290450 - num_examples: 1006 - download_size: 3702442 - dataset_size: 290450 -- config_name: iitp-mr.hi + num_bytes: 210175 + num_examples: 997 + download_size: 939801 + dataset_size: 2093471 +- config_name: sna.bn features: - name: text dtype: string @@ -1073,89 +729,25 @@ dataset_info: dtype: class_label: names: - '0': negative - '1': neutral - '2': positive + '0': kolkata + '1': state + '2': national + '3': sports + '4': entertainment + '5': international splits: - name: train - num_bytes: 6704909 - num_examples: 2480 + num_bytes: 46070046 + num_examples: 11284 - name: validation - num_bytes: 822222 - num_examples: 310 + num_bytes: 5648126 + num_examples: 1411 - name: test - num_bytes: 702377 - num_examples: 310 - download_size: 1742048 - dataset_size: 8229508 -- config_name: iitp-pr.hi - features: - - name: text - dtype: string - - name: label - dtype: - class_label: - names: - '0': negative - '1': neutral - '2': positive - splits: - - name: train - num_bytes: 945593 - num_examples: 4182 - - name: validation - num_bytes: 120104 - num_examples: 523 - - name: test - num_bytes: 121914 - num_examples: 523 - download_size: 266545 - dataset_size: 1187611 -- config_name: actsa-sc.te - features: - - name: text - dtype: string - - name: label - dtype: - class_label: - names: - '0': positive - '1': negative - splits: - - name: train - num_bytes: 1370911 - num_examples: 4328 - - name: validation - num_bytes: 166093 - num_examples: 541 - - name: test - num_bytes: 168295 - num_examples: 541 - download_size: 378882 - dataset_size: 1705299 -- config_name: md.hi - features: - - name: sentence - dtype: string - - name: discourse_mode - dtype: string - - name: story_number - dtype: int32 - - name: id - dtype: int32 - splits: - - name: train - num_bytes: 1672117 - num_examples: 7974 - - name: validation - num_bytes: 211195 - num_examples: 997 - - name: test - num_bytes: 210183 - num_examples: 997 - download_size: 1048441 - dataset_size: 2093495 -- config_name: wiki-ner.as + num_bytes: 5799979 + num_examples: 1411 + download_size: 21415940 + dataset_size: 57518151 +- config_name: wiki-ner.as features: - name: tokens sequence: string @@ -1175,16 +767,16 @@ dataset_info: sequence: string splits: - name: train - num_bytes: 375007 + num_bytes: 374983 num_examples: 1021 - name: validation - num_bytes: 49336 + num_bytes: 49312 num_examples: 157 - name: test - num_bytes: 50480 + num_bytes: 50456 num_examples: 160 - download_size: 5980272 - dataset_size: 474823 + download_size: 72919 + dataset_size: 474751 - config_name: wiki-ner.bn features: - name: tokens @@ -1205,16 +797,16 @@ dataset_info: sequence: string splits: - name: train - num_bytes: 7502896 + num_bytes: 7502824 num_examples: 20223 - name: validation - num_bytes: 988707 + num_bytes: 988683 num_examples: 2985 - name: test - num_bytes: 985965 + num_bytes: 985941 num_examples: 2690 - download_size: 5980272 - dataset_size: 9477568 + download_size: 1278219 + dataset_size: 9477448 - config_name: wiki-ner.gu features: - name: tokens @@ -1235,16 +827,16 @@ dataset_info: sequence: string splits: - name: train - num_bytes: 1571612 + num_bytes: 1571588 num_examples: 2343 - name: validation - num_bytes: 192828 + num_bytes: 192804 num_examples: 297 - name: test - num_bytes: 197901 + num_bytes: 197877 num_examples: 255 - download_size: 5980272 - dataset_size: 1962341 + download_size: 329660 + dataset_size: 1962269 - config_name: wiki-ner.hi features: - name: tokens @@ -1265,16 +857,16 @@ dataset_info: sequence: string splits: - name: train - num_bytes: 3762529 + num_bytes: 3762505 num_examples: 9463 - name: validation - num_bytes: 468702 + num_bytes: 468678 num_examples: 1114 - name: test - num_bytes: 475277 + num_bytes: 475253 num_examples: 1256 - download_size: 5980272 - dataset_size: 4706508 + download_size: 948132 + dataset_size: 4706436 - config_name: wiki-ner.kn features: - name: tokens @@ -1295,16 +887,16 @@ dataset_info: sequence: string splits: - name: train - num_bytes: 1352051 + num_bytes: 1352027 num_examples: 2679 - name: validation - num_bytes: 179562 + num_bytes: 179538 num_examples: 412 - name: test - num_bytes: 180815 + num_bytes: 180791 num_examples: 476 - download_size: 5980272 - dataset_size: 1712428 + download_size: 421877 + dataset_size: 1712356 - config_name: wiki-ner.ml features: - name: tokens @@ -1325,16 +917,16 @@ dataset_info: sequence: string splits: - name: train - num_bytes: 7678935 + num_bytes: 7678887 num_examples: 15620 - name: validation - num_bytes: 969971 + num_bytes: 969947 num_examples: 2067 - name: test - num_bytes: 991126 + num_bytes: 991102 num_examples: 2042 - download_size: 5980272 - dataset_size: 9640032 + download_size: 2390442 + dataset_size: 9639936 - config_name: wiki-ner.mr features: - name: tokens @@ -1355,16 +947,16 @@ dataset_info: sequence: string splits: - name: train - num_bytes: 5431537 + num_bytes: 5431489 num_examples: 12151 - name: validation - num_bytes: 701661 + num_bytes: 701637 num_examples: 1498 - name: test - num_bytes: 655706 + num_bytes: 655682 num_examples: 1329 - download_size: 5980272 - dataset_size: 6788904 + download_size: 1410663 + dataset_size: 6788808 - config_name: wiki-ner.or features: - name: tokens @@ -1385,16 +977,16 @@ dataset_info: sequence: string splits: - name: train - num_bytes: 493782 + num_bytes: 493758 num_examples: 1077 - name: validation - num_bytes: 58592 + num_bytes: 58568 num_examples: 132 - name: test - num_bytes: 62235 + num_bytes: 62211 num_examples: 153 - download_size: 5980272 - dataset_size: 614609 + download_size: 102783 + dataset_size: 614537 - config_name: wiki-ner.pa features: - name: tokens @@ -1415,16 +1007,16 @@ dataset_info: sequence: string splits: - name: train - num_bytes: 520268 + num_bytes: 520244 num_examples: 1408 - name: validation - num_bytes: 61194 + num_bytes: 61170 num_examples: 186 - name: test - num_bytes: 61812 + num_bytes: 61788 num_examples: 179 - download_size: 5980272 - dataset_size: 643274 + download_size: 149727 + dataset_size: 643202 - config_name: wiki-ner.ta features: - name: tokens @@ -1445,16 +1037,16 @@ dataset_info: sequence: string splits: - name: train - num_bytes: 10117152 + num_bytes: 10117080 num_examples: 20466 - name: validation - num_bytes: 1267212 + num_bytes: 1267188 num_examples: 2586 - name: test - num_bytes: 1321650 + num_bytes: 1321626 num_examples: 2611 - download_size: 5980272 - dataset_size: 12706014 + download_size: 2819083 + dataset_size: 12705894 - config_name: wiki-ner.te features: - name: tokens @@ -1475,16 +1067,831 @@ dataset_info: sequence: string splits: - name: train - num_bytes: 3881235 + num_bytes: 3881211 num_examples: 7978 - name: validation - num_bytes: 458533 + num_bytes: 458509 num_examples: 841 - name: test - num_bytes: 507830 + num_bytes: 507806 num_examples: 1110 - download_size: 5980272 - dataset_size: 4847598 + download_size: 1006881 + dataset_size: 4847526 +- config_name: wnli.en + features: + - name: hypothesis + dtype: string + - name: premise + dtype: string + - name: label + dtype: + class_label: + names: + '0': not_entailment + '1': entailment + '2': None + splits: + - name: train + num_bytes: 104569 + num_examples: 635 + - name: validation + num_bytes: 11878 + num_examples: 71 + - name: test + num_bytes: 37297 + num_examples: 146 + download_size: 57667 + dataset_size: 153744 +- config_name: wnli.gu + features: + - name: hypothesis + dtype: string + - name: premise + dtype: string + - name: label + dtype: + class_label: + names: + '0': not_entailment + '1': entailment + '2': None + splits: + - name: train + num_bytes: 251554 + num_examples: 635 + - name: validation + num_bytes: 28175 + num_examples: 71 + - name: test + num_bytes: 94578 + num_examples: 146 + download_size: 98032 + dataset_size: 374307 +- config_name: wnli.hi + features: + - name: hypothesis + dtype: string + - name: premise + dtype: string + - name: label + dtype: + class_label: + names: + '0': not_entailment + '1': entailment + '2': None + splits: + - name: train + num_bytes: 253334 + num_examples: 635 + - name: validation + num_bytes: 28676 + num_examples: 71 + - name: test + num_bytes: 90823 + num_examples: 146 + download_size: 99450 + dataset_size: 372833 +- config_name: wnli.mr + features: + - name: hypothesis + dtype: string + - name: premise + dtype: string + - name: label + dtype: + class_label: + names: + '0': not_entailment + '1': entailment + '2': None + splits: + - name: train + num_bytes: 256649 + num_examples: 635 + - name: validation + num_bytes: 29218 + num_examples: 71 + - name: test + num_bytes: 97128 + num_examples: 146 + download_size: 103774 + dataset_size: 382995 +- config_name: wstp.as + features: + - name: sectionText + dtype: string + - name: correctTitle + dtype: string + - name: titleA + dtype: string + - name: titleB + dtype: string + - name: titleC + dtype: string + - name: titleD + dtype: string + - name: url + dtype: string + splits: + - name: train + num_bytes: 13581336 + num_examples: 5000 + - name: validation + num_bytes: 1698968 + num_examples: 625 + - name: test + num_bytes: 1697650 + num_examples: 626 + download_size: 6959458 + dataset_size: 16977954 +- config_name: wstp.bn + features: + - name: sectionText + dtype: string + - name: correctTitle + dtype: string + - name: titleA + dtype: string + - name: titleB + dtype: string + - name: titleC + dtype: string + - name: titleD + dtype: string + - name: url + dtype: string + splits: + - name: train + num_bytes: 143340457 + num_examples: 47580 + - name: validation + num_bytes: 17759236 + num_examples: 5947 + - name: test + num_bytes: 17633865 + num_examples: 5948 + download_size: 69145372 + dataset_size: 178733558 +- config_name: wstp.gu + features: + - name: sectionText + dtype: string + - name: correctTitle + dtype: string + - name: titleA + dtype: string + - name: titleB + dtype: string + - name: titleC + dtype: string + - name: titleD + dtype: string + - name: url + dtype: string + splits: + - name: train + num_bytes: 39353464 + num_examples: 10004 + - name: validation + num_bytes: 4887752 + num_examples: 1251 + - name: test + num_bytes: 4699158 + num_examples: 1251 + download_size: 19763249 + dataset_size: 48940374 +- config_name: wstp.hi + features: + - name: sectionText + dtype: string + - name: correctTitle + dtype: string + - name: titleA + dtype: string + - name: titleB + dtype: string + - name: titleC + dtype: string + - name: titleD + dtype: string + - name: url + dtype: string + splits: + - name: train + num_bytes: 158529578 + num_examples: 44069 + - name: validation + num_bytes: 19371904 + num_examples: 5509 + - name: test + num_bytes: 19593001 + num_examples: 5509 + download_size: 77868574 + dataset_size: 197494483 +- config_name: wstp.kn + features: + - name: sectionText + dtype: string + - name: correctTitle + dtype: string + - name: titleA + dtype: string + - name: titleB + dtype: string + - name: titleC + dtype: string + - name: titleD + dtype: string + - name: url + dtype: string + splits: + - name: train + num_bytes: 139950313 + num_examples: 35379 + - name: validation + num_bytes: 17789782 + num_examples: 4422 + - name: test + num_bytes: 17897031 + num_examples: 4423 + download_size: 67719504 + dataset_size: 175637126 +- config_name: wstp.ml + features: + - name: sectionText + dtype: string + - name: correctTitle + dtype: string + - name: titleA + dtype: string + - name: titleB + dtype: string + - name: titleC + dtype: string + - name: titleD + dtype: string + - name: url + dtype: string + splits: + - name: train + num_bytes: 88360504 + num_examples: 27527 + - name: validation + num_bytes: 11193340 + num_examples: 3441 + - name: test + num_bytes: 11150914 + num_examples: 3441 + download_size: 42336357 + dataset_size: 110704758 +- config_name: wstp.mr + features: + - name: sectionText + dtype: string + - name: correctTitle + dtype: string + - name: titleA + dtype: string + - name: titleB + dtype: string + - name: titleC + dtype: string + - name: titleD + dtype: string + - name: url + dtype: string + splits: + - name: train + num_bytes: 28302341 + num_examples: 10446 + - name: validation + num_bytes: 3328798 + num_examples: 1306 + - name: test + num_bytes: 3631684 + num_examples: 1306 + download_size: 13886208 + dataset_size: 35262823 +- config_name: wstp.or + features: + - name: sectionText + dtype: string + - name: correctTitle + dtype: string + - name: titleA + dtype: string + - name: titleB + dtype: string + - name: titleC + dtype: string + - name: titleD + dtype: string + - name: url + dtype: string + splits: + - name: train + num_bytes: 10900006 + num_examples: 4015 + - name: validation + num_bytes: 1264935 + num_examples: 502 + - name: test + num_bytes: 1344652 + num_examples: 502 + download_size: 5319128 + dataset_size: 13509593 +- config_name: wstp.pa + features: + - name: sectionText + dtype: string + - name: correctTitle + dtype: string + - name: titleA + dtype: string + - name: titleB + dtype: string + - name: titleC + dtype: string + - name: titleD + dtype: string + - name: url + dtype: string + splits: + - name: train + num_bytes: 22189730 + num_examples: 8772 + - name: validation + num_bytes: 2789186 + num_examples: 1097 + - name: test + num_bytes: 2685767 + num_examples: 1097 + download_size: 11201369 + dataset_size: 27664683 +- config_name: wstp.ta + features: + - name: sectionText + dtype: string + - name: correctTitle + dtype: string + - name: titleA + dtype: string + - name: titleB + dtype: string + - name: titleC + dtype: string + - name: titleD + dtype: string + - name: url + dtype: string + splits: + - name: train + num_bytes: 151929218 + num_examples: 48940 + - name: validation + num_bytes: 18817167 + num_examples: 6117 + - name: test + num_bytes: 18815071 + num_examples: 6118 + download_size: 68699092 + dataset_size: 189561456 +- config_name: wstp.te + features: + - name: sectionText + dtype: string + - name: correctTitle + dtype: string + - name: titleA + dtype: string + - name: titleB + dtype: string + - name: titleC + dtype: string + - name: titleD + dtype: string + - name: url + dtype: string + splits: + - name: train + num_bytes: 151696691 + num_examples: 80000 + - name: validation + num_bytes: 19003169 + num_examples: 10000 + - name: test + num_bytes: 18991913 + num_examples: 10000 + download_size: 50158580 + dataset_size: 189691773 +configs: +- config_name: actsa-sc.te + data_files: + - split: train + path: actsa-sc.te/train-* + - split: validation + path: actsa-sc.te/validation-* + - split: test + path: actsa-sc.te/test-* +- config_name: bbca.hi + data_files: + - split: train + path: bbca.hi/train-* + - split: test + path: bbca.hi/test-* +- config_name: copa.en + data_files: + - split: train + path: copa.en/train-* + - split: validation + path: copa.en/validation-* + - split: test + path: copa.en/test-* +- config_name: copa.gu + data_files: + - split: train + path: copa.gu/train-* + - split: validation + path: copa.gu/validation-* + - split: test + path: copa.gu/test-* +- config_name: copa.hi + data_files: + - split: train + path: copa.hi/train-* + - split: validation + path: copa.hi/validation-* + - split: test + path: copa.hi/test-* +- config_name: copa.mr + data_files: + - split: train + path: copa.mr/train-* + - split: validation + path: copa.mr/validation-* + - split: test + path: copa.mr/test-* +- config_name: csqa.as + data_files: + - split: test + path: csqa.as/test-* +- config_name: csqa.bn + data_files: + - split: test + path: csqa.bn/test-* +- config_name: csqa.gu + data_files: + - split: test + path: csqa.gu/test-* +- config_name: csqa.hi + data_files: + - split: test + path: csqa.hi/test-* +- config_name: csqa.kn + data_files: + - split: test + path: csqa.kn/test-* +- config_name: csqa.ml + data_files: + - split: test + path: csqa.ml/test-* +- config_name: csqa.mr + data_files: + - split: test + path: csqa.mr/test-* +- config_name: csqa.or + data_files: + - split: test + path: csqa.or/test-* +- config_name: csqa.pa + data_files: + - split: test + path: csqa.pa/test-* +- config_name: csqa.ta + data_files: + - split: test + path: csqa.ta/test-* +- config_name: csqa.te + data_files: + - split: test + path: csqa.te/test-* +- config_name: cvit-mkb-clsr.en-bn + data_files: + - split: test + path: cvit-mkb-clsr.en-bn/test-* +- config_name: cvit-mkb-clsr.en-gu + data_files: + - split: test + path: cvit-mkb-clsr.en-gu/test-* +- config_name: cvit-mkb-clsr.en-hi + data_files: + - split: test + path: cvit-mkb-clsr.en-hi/test-* +- config_name: cvit-mkb-clsr.en-ml + data_files: + - split: test + path: cvit-mkb-clsr.en-ml/test-* +- config_name: cvit-mkb-clsr.en-mr + data_files: + - split: test + path: cvit-mkb-clsr.en-mr/test-* +- config_name: cvit-mkb-clsr.en-or + data_files: + - split: test + path: cvit-mkb-clsr.en-or/test-* +- config_name: cvit-mkb-clsr.en-ta + data_files: + - split: test + path: cvit-mkb-clsr.en-ta/test-* +- config_name: cvit-mkb-clsr.en-te + data_files: + - split: test + path: cvit-mkb-clsr.en-te/test-* +- config_name: cvit-mkb-clsr.en-ur + data_files: + - split: test + path: cvit-mkb-clsr.en-ur/test-* +- config_name: iitp-mr.hi + data_files: + - split: train + path: iitp-mr.hi/train-* + - split: validation + path: iitp-mr.hi/validation-* + - split: test + path: iitp-mr.hi/test-* +- config_name: iitp-pr.hi + data_files: + - split: train + path: iitp-pr.hi/train-* + - split: validation + path: iitp-pr.hi/validation-* + - split: test + path: iitp-pr.hi/test-* +- config_name: inltkh.gu + data_files: + - split: train + path: inltkh.gu/train-* + - split: validation + path: inltkh.gu/validation-* + - split: test + path: inltkh.gu/test-* +- config_name: inltkh.ml + data_files: + - split: train + path: inltkh.ml/train-* + - split: validation + path: inltkh.ml/validation-* + - split: test + path: inltkh.ml/test-* +- config_name: inltkh.mr + data_files: + - split: train + path: inltkh.mr/train-* + - split: validation + path: inltkh.mr/validation-* + - split: test + path: inltkh.mr/test-* +- config_name: inltkh.ta + data_files: + - split: train + path: inltkh.ta/train-* + - split: validation + path: inltkh.ta/validation-* + - split: test + path: inltkh.ta/test-* +- config_name: inltkh.te + data_files: + - split: train + path: inltkh.te/train-* + - split: validation + path: inltkh.te/validation-* + - split: test + path: inltkh.te/test-* +- config_name: md.hi + data_files: + - split: train + path: md.hi/train-* + - split: validation + path: md.hi/validation-* + - split: test + path: md.hi/test-* +- config_name: sna.bn + data_files: + - split: train + path: sna.bn/train-* + - split: validation + path: sna.bn/validation-* + - split: test + path: sna.bn/test-* +- config_name: wiki-ner.as + data_files: + - split: train + path: wiki-ner.as/train-* + - split: validation + path: wiki-ner.as/validation-* + - split: test + path: wiki-ner.as/test-* +- config_name: wiki-ner.bn + data_files: + - split: train + path: wiki-ner.bn/train-* + - split: validation + path: wiki-ner.bn/validation-* + - split: test + path: wiki-ner.bn/test-* +- config_name: wiki-ner.gu + data_files: + - split: train + path: wiki-ner.gu/train-* + - split: validation + path: wiki-ner.gu/validation-* + - split: test + path: wiki-ner.gu/test-* +- config_name: wiki-ner.hi + data_files: + - split: train + path: wiki-ner.hi/train-* + - split: validation + path: wiki-ner.hi/validation-* + - split: test + path: wiki-ner.hi/test-* +- config_name: wiki-ner.kn + data_files: + - split: train + path: wiki-ner.kn/train-* + - split: validation + path: wiki-ner.kn/validation-* + - split: test + path: wiki-ner.kn/test-* +- config_name: wiki-ner.ml + data_files: + - split: train + path: wiki-ner.ml/train-* + - split: validation + path: wiki-ner.ml/validation-* + - split: test + path: wiki-ner.ml/test-* +- config_name: wiki-ner.mr + data_files: + - split: train + path: wiki-ner.mr/train-* + - split: validation + path: wiki-ner.mr/validation-* + - split: test + path: wiki-ner.mr/test-* +- config_name: wiki-ner.or + data_files: + - split: train + path: wiki-ner.or/train-* + - split: validation + path: wiki-ner.or/validation-* + - split: test + path: wiki-ner.or/test-* +- config_name: wiki-ner.pa + data_files: + - split: train + path: wiki-ner.pa/train-* + - split: validation + path: wiki-ner.pa/validation-* + - split: test + path: wiki-ner.pa/test-* +- config_name: wiki-ner.ta + data_files: + - split: train + path: wiki-ner.ta/train-* + - split: validation + path: wiki-ner.ta/validation-* + - split: test + path: wiki-ner.ta/test-* +- config_name: wiki-ner.te + data_files: + - split: train + path: wiki-ner.te/train-* + - split: validation + path: wiki-ner.te/validation-* + - split: test + path: wiki-ner.te/test-* +- config_name: wnli.en + data_files: + - split: train + path: wnli.en/train-* + - split: validation + path: wnli.en/validation-* + - split: test + path: wnli.en/test-* +- config_name: wnli.gu + data_files: + - split: train + path: wnli.gu/train-* + - split: validation + path: wnli.gu/validation-* + - split: test + path: wnli.gu/test-* +- config_name: wnli.hi + data_files: + - split: train + path: wnli.hi/train-* + - split: validation + path: wnli.hi/validation-* + - split: test + path: wnli.hi/test-* +- config_name: wnli.mr + data_files: + - split: train + path: wnli.mr/train-* + - split: validation + path: wnli.mr/validation-* + - split: test + path: wnli.mr/test-* +- config_name: wstp.as + data_files: + - split: train + path: wstp.as/train-* + - split: validation + path: wstp.as/validation-* + - split: test + path: wstp.as/test-* +- config_name: wstp.bn + data_files: + - split: train + path: wstp.bn/train-* + - split: validation + path: wstp.bn/validation-* + - split: test + path: wstp.bn/test-* +- config_name: wstp.gu + data_files: + - split: train + path: wstp.gu/train-* + - split: validation + path: wstp.gu/validation-* + - split: test + path: wstp.gu/test-* +- config_name: wstp.hi + data_files: + - split: train + path: wstp.hi/train-* + - split: validation + path: wstp.hi/validation-* + - split: test + path: wstp.hi/test-* +- config_name: wstp.kn + data_files: + - split: train + path: wstp.kn/train-* + - split: validation + path: wstp.kn/validation-* + - split: test + path: wstp.kn/test-* +- config_name: wstp.ml + data_files: + - split: train + path: wstp.ml/train-* + - split: validation + path: wstp.ml/validation-* + - split: test + path: wstp.ml/test-* +- config_name: wstp.mr + data_files: + - split: train + path: wstp.mr/train-* + - split: validation + path: wstp.mr/validation-* + - split: test + path: wstp.mr/test-* +- config_name: wstp.or + data_files: + - split: train + path: wstp.or/train-* + - split: validation + path: wstp.or/validation-* + - split: test + path: wstp.or/test-* +- config_name: wstp.pa + data_files: + - split: train + path: wstp.pa/train-* + - split: validation + path: wstp.pa/validation-* + - split: test + path: wstp.pa/test-* +- config_name: wstp.ta + data_files: + - split: train + path: wstp.ta/train-* + - split: validation + path: wstp.ta/validation-* + - split: test + path: wstp.ta/test-* +- config_name: wstp.te + data_files: + - split: train + path: wstp.te/train-* + - split: validation + path: wstp.te/validation-* + - split: test + path: wstp.te/test-* --- # Dataset Card for "indic_glue" diff --git a/actsa-sc.te/test-00000-of-00001.parquet b/actsa-sc.te/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..bbec268af369e347ef7f3bb5288af09d5ca66536 --- /dev/null +++ b/actsa-sc.te/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:547e931b7a6a7dc5691cbfbb34d4aad88c6057190928f00e30878ee0280671c8 +size 73658 diff --git a/actsa-sc.te/train-00000-of-00001.parquet b/actsa-sc.te/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..593fe68ecb6e0b5521cacc4f039d180f382839a8 --- /dev/null +++ b/actsa-sc.te/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a9b373ca141cbd7e4c85f6fac44819bcb41964e19f8c7aabdecb1a059f49f5b +size 581222 diff --git a/actsa-sc.te/validation-00000-of-00001.parquet b/actsa-sc.te/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5cd3abf5b36883ebfc0eb8b6141c1e7b75003086 --- /dev/null +++ b/actsa-sc.te/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58319d149d570e75b95099f2e92302618b3267c19e6721c44aa353c5f9e0860d +size 72750 diff --git a/bbca.hi/test-00000-of-00001.parquet b/bbca.hi/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e4aa3ea94c6b8d996b03dae0d5d4d42071427000 --- /dev/null +++ b/bbca.hi/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bfe107242ab37c2b1507d7dfddbf0888f10541e6b26db885ae2357f0ce3c5672 +size 2055843 diff --git a/bbca.hi/train-00000-of-00001.parquet b/bbca.hi/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..afa3e948e5568685de97206aad3db4ff7fe94090 --- /dev/null +++ b/bbca.hi/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:879f7a778310ded1027c80be11115d0143101bb03e669cb387749a540f77e272 +size 8293172 diff --git a/copa.en/test-00000-of-00001.parquet b/copa.en/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ebe170a2eed5628c071c0a2b1c11fdfaf3bcd898 --- /dev/null +++ b/copa.en/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85f7d93b8edb41ef33fa17a879978f7738b199b483aed2ab80270baa684bc752 +size 37129 diff --git a/copa.en/train-00000-of-00001.parquet b/copa.en/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..0c6b6bdc1b6beaf22ab1f7993a1dc2000c41d9f3 --- /dev/null +++ b/copa.en/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f9e67481402bbb497ee8b03cbdab409877d87cd41ee66a6a36a89210a9d16b3 +size 31352 diff --git a/copa.en/validation-00000-of-00001.parquet b/copa.en/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e7212e6cd584728f6fa3ed0c2b032df814e1aee6 --- /dev/null +++ b/copa.en/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76bacedb05c38fdbee87dd07225dc0639da82b6794c94079da1a962f9b21f674 +size 10950 diff --git a/copa.gu/test-00000-of-00001.parquet b/copa.gu/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..22db25f02a097f83fd79b0aebc4b4653352e0248 --- /dev/null +++ b/copa.gu/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1cf523936519f4717809d6ea3256f0072cc7f741fcf47fe42f7b52789bb907ad +size 50219 diff --git a/copa.gu/train-00000-of-00001.parquet b/copa.gu/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4638fae69d04d7698af75f37cd85c98f3d8e28f0 --- /dev/null +++ b/copa.gu/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:154695a1276de70307d0d4550e6d26bfba16f66cea1999842228b2c4086e5e81 +size 43221 diff --git a/copa.gu/validation-00000-of-00001.parquet b/copa.gu/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5fe25b224abc5264cf9d57bca4e20ef30bc68d37 --- /dev/null +++ b/copa.gu/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f05c014c2cff41fc897136680400856677ad9a1349aecd78f4a051f2a2f67e04 +size 14228 diff --git a/copa.hi/test-00000-of-00001.parquet b/copa.hi/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..84944ce73cf3e6ee6e043c308ebc9cab9bb9381f --- /dev/null +++ b/copa.hi/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1bd61116c47f6a2372e105d136bf20b8092bd8da090b4d8d565fa86341c400aa +size 48589 diff --git a/copa.hi/train-00000-of-00001.parquet b/copa.hi/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b81dc16b4fb7d5d5c8a6002bb1d967fe99ff381d --- /dev/null +++ b/copa.hi/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1636be02af20935fb715b927eaf4d294bfdb25241a136729f743fcf1a4294a9 +size 41769 diff --git a/copa.hi/validation-00000-of-00001.parquet b/copa.hi/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9f2eb93f90fdea2b40ef534edcfa12e5f59cbcd2 --- /dev/null +++ b/copa.hi/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a5ecc82d55bf3432f81051a26dbe6cb8d727c81c59561fd02ed76775ad642de +size 13875 diff --git a/copa.mr/test-00000-of-00001.parquet b/copa.mr/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..24173876b510a42508cbea31e9025ac8424ce5ba --- /dev/null +++ b/copa.mr/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:854c3fd041944b398b613c811186eefb734f2b0f9870d899ba86e2e9d9618f0d +size 49376 diff --git a/copa.mr/train-00000-of-00001.parquet b/copa.mr/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a22d099524df3ab1201f71c13676f196248aa108 --- /dev/null +++ b/copa.mr/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8bfd062c70dad11b8bb141e69b07db6630b52a27e4ecdb35c5b1cbe3625df2e +size 42450 diff --git a/copa.mr/validation-00000-of-00001.parquet b/copa.mr/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..7eeb20f577f0b571c9664df79a1680726c286c1f --- /dev/null +++ b/copa.mr/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:08383e2343c4ed3981f7f9784e38bc3cfc96f85c84bb5a19489b7606016f41cd +size 14136 diff --git a/csqa.as/test-00000-of-00001.parquet b/csqa.as/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..426f97713d2fda2135d018c6f49dd789949b315e --- /dev/null +++ b/csqa.as/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df7c61b6b09ace6df683d4d9b10ca245d3b1f73e999a12fe3c34f1fc344ab87e +size 1390423 diff --git a/csqa.bn/test-00000-of-00001.parquet b/csqa.bn/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..dca7c4ad4045af3bcec06b49a6eb0a7316db5b98 --- /dev/null +++ b/csqa.bn/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1285f802c1c9295b488747b900a82e5ef93f8ee0fdc47c24ca7a7538c0f2694e +size 19648180 diff --git a/csqa.gu/test-00000-of-00001.parquet b/csqa.gu/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ccdf04bfcf764337b90dac35fa9a6d42d35157fe --- /dev/null +++ b/csqa.gu/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82e757345506fc4fa9e886562ba381a5606d4ebd3f435569bf302656cd1971e6 +size 6027825 diff --git a/csqa.hi/test-00000-of-00001.parquet b/csqa.hi/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..42a28fd899fca05b2c147fcb79f86af27c0ef107 --- /dev/null +++ b/csqa.hi/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e07820e037c4f49939b64427052cbaf9ee9a8d3aa8bb0a976d247d4640a37b7 +size 14711258 diff --git a/csqa.kn/test-00000-of-00001.parquet b/csqa.kn/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ed9ce5982a1e4ab7e7e161022124b84d3fc760df --- /dev/null +++ b/csqa.kn/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0fa9d7b234ce95b53022b8b64d0d8f3527d0db8b8863e05722be7ce66adf729 +size 7669655 diff --git a/csqa.ml/test-00000-of-00001.parquet b/csqa.ml/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..21569cfadb2575d26ae368be3a90862bb4fa443b --- /dev/null +++ b/csqa.ml/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:676aab623a259dd0052fd531f2677faac8eaeb9c05737333f6ff9853656cba02 +size 17382215 diff --git a/csqa.mr/test-00000-of-00001.parquet b/csqa.mr/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5fe378836f76a6aaf60f9aef0bd34f582b7c379e --- /dev/null +++ b/csqa.mr/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7f93e52004972f3a32cf2f8a188f4190faef9fd37b3f9d95a9c6ab46ca5bacf +size 5072738 diff --git a/csqa.or/test-00000-of-00001.parquet b/csqa.or/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..afa4a3d751543b5fa1c174851bff49c7015738b1 --- /dev/null +++ b/csqa.or/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d35f0a2cb5fa0e192dd2ba998d95a574018a2d13faf10d5d967b5fced4973b88 +size 948046 diff --git a/csqa.pa/test-00000-of-00001.parquet b/csqa.pa/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..18e92c875a084143dbeef65c5f9dfc3219398dfc --- /dev/null +++ b/csqa.pa/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46b2dc926197394a3a5cd9df6b8fd00eb0c290f89424fcba08e959b953331880 +size 2194109 diff --git a/csqa.ta/test-00000-of-00001.parquet b/csqa.ta/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5d3ad21e83500aec6d901866bf9b641f61b6b284 --- /dev/null +++ b/csqa.ta/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:743f1b5c0234cf5c9a736e51a8d2c4167626c3467aae416a2bfa011ffd64ee2f +size 20789467 diff --git a/csqa.te/test-00000-of-00001.parquet b/csqa.te/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..dd37e12715f4d95b72b1f42f37e682ef71f51836 --- /dev/null +++ b/csqa.te/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1753af7868bf24a4f1c95c58146f531ec29fe60b56ad08d28d7a70b21252311e +size 17447618 diff --git a/cvit-mkb-clsr.en-bn/test-00000-of-00001.parquet b/cvit-mkb-clsr.en-bn/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..355de00c1b3f5b093946eb3a744b2cb0a6225aff --- /dev/null +++ b/cvit-mkb-clsr.en-bn/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8a3b311d11c4bf717ba30f7ee3213b0da88ef1fa89e1bc1cf08445f6941c0b7 +size 945551 diff --git a/cvit-mkb-clsr.en-gu/test-00000-of-00001.parquet b/cvit-mkb-clsr.en-gu/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9f73494ac7282d1196606e086781d07fc4837f9b --- /dev/null +++ b/cvit-mkb-clsr.en-gu/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b00f1d013b8a43e99d070ad1542c7e4a8e8b241eeb92b535fd911a8dbf3869d +size 1093313 diff --git a/cvit-mkb-clsr.en-hi/test-00000-of-00001.parquet b/cvit-mkb-clsr.en-hi/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b1def27d4ebe4da44c869e09b632ce9a0b7d7f7e --- /dev/null +++ b/cvit-mkb-clsr.en-hi/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76f404a1703e54eed5cb533750678e96eaac620345b4c322341f509b0b69e148 +size 890609 diff --git a/cvit-mkb-clsr.en-ml/test-00000-of-00001.parquet b/cvit-mkb-clsr.en-ml/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..536256c7ed4b3ed8c9188b5c0f4cda4455c6ebd5 --- /dev/null +++ b/cvit-mkb-clsr.en-ml/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e76dd0ff59ab61e4179906d7ef428dce0c17ed0a7230794bf7b226ece8f78632 +size 868956 diff --git a/cvit-mkb-clsr.en-mr/test-00000-of-00001.parquet b/cvit-mkb-clsr.en-mr/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..6270643760f54a3e020fa9c4028c4bc70ecea421 --- /dev/null +++ b/cvit-mkb-clsr.en-mr/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1ac94d4f2412ebfb2148cae33acdf9d2f246cf572af5c3489e91922366609c7 +size 993961 diff --git a/cvit-mkb-clsr.en-or/test-00000-of-00001.parquet b/cvit-mkb-clsr.en-or/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..6318be537def6b4974a4e853b79ad1ece90ffb59 --- /dev/null +++ b/cvit-mkb-clsr.en-or/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:78cc4829a09dc65e12782fef852d8d7f656223aa66c8dd1dcf11585bf6039ff6 +size 134334 diff --git a/cvit-mkb-clsr.en-ta/test-00000-of-00001.parquet b/cvit-mkb-clsr.en-ta/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..dbcb87f9cd7dd7b9ca098dd89e0c8b2dad86d36f --- /dev/null +++ b/cvit-mkb-clsr.en-ta/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4454d483c2c876ee05ab48350f12d3bef1929a8c9d806acd45b96b216a367c5 +size 1091653 diff --git a/cvit-mkb-clsr.en-te/test-00000-of-00001.parquet b/cvit-mkb-clsr.en-te/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..23e5dc2ce74b05fbc85a90e2f545400402fdcdc2 --- /dev/null +++ b/cvit-mkb-clsr.en-te/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb3707a79cdc6bdd911014cdeb7bf4abf6e57446149ce4871c523b503a967926 +size 840410 diff --git a/cvit-mkb-clsr.en-ur/test-00000-of-00001.parquet b/cvit-mkb-clsr.en-ur/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d476e5c9018c5e91cd763317f3395651f089f7f3 --- /dev/null +++ b/cvit-mkb-clsr.en-ur/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eefebc6476f58dac3cdbadc967baa1a291b6ada4903b3c3514a9aab19c75d0e0 +size 166129 diff --git a/data/actsa.zip b/data/actsa.zip deleted file mode 100644 index 66574dd6f23f60d3072b53f7e9fc7d8d7579dce1..0000000000000000000000000000000000000000 --- a/data/actsa.zip +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:709e1c6807c3ce923b44f576c9169b10ee574488bcf537f90ba7d6b109e347cd -size 380203 diff --git a/data/bbc-articles.zip b/data/bbc-articles.zip deleted file mode 100644 index 84e299943f78fcf392727c0d2f6f21d2b46f89d1..0000000000000000000000000000000000000000 --- a/data/bbc-articles.zip +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:2f1d4b8914b926dc9e860d6820a55576f571b4f2878cf88881612d88a65e0e8e -size 5770896 diff --git a/data/copa-translated.zip b/data/copa-translated.zip deleted file mode 100644 index 0376765e7fc3d0abbe1597e944268b54cbd96d65..0000000000000000000000000000000000000000 --- a/data/copa-translated.zip +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a3f2f16a47a213f3d20bc00ecd0f0ea9f59fcd69e580e6af4d55acac7ac6f86b -size 765334 diff --git a/data/cvit-mkb.zip b/data/cvit-mkb.zip deleted file mode 100644 index 0c2ae4d5e4ca609c42029c320eea7f613e6853e4..0000000000000000000000000000000000000000 --- a/data/cvit-mkb.zip +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a4f1eb51c84d0c6eee6a6f6676319a9edc4c67ef9fcf99a984fd290723b99439 -size 3693764 diff --git a/data/iitp-movie-reviews.zip b/data/iitp-movie-reviews.zip deleted file mode 100644 index b11f5f0698b64f1238b6d02bb7d547e3985c1fa4..0000000000000000000000000000000000000000 --- a/data/iitp-movie-reviews.zip +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:fed75ccb42efc93e13c35b7bf9353c0971a9ae3a7337b05061ac1d6c9cb2beac -size 1743573 diff --git a/data/iitp-product-reviews.zip b/data/iitp-product-reviews.zip deleted file mode 100644 index b148a70e613bf20ba96a77624c0598f80653ee4b..0000000000000000000000000000000000000000 --- a/data/iitp-product-reviews.zip +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:72bccfebfc2344633576ef2aff934158a8bd2f297f5acc9901bc1e981cb4ae26 -size 268737 diff --git a/data/inltk-headlines.zip b/data/inltk-headlines.zip deleted file mode 100644 index 545f19376c1d1b42fce9625dffe0d38a78a95a64..0000000000000000000000000000000000000000 --- a/data/inltk-headlines.zip +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d24a2c21675fb1bbd7e191f9f2e438333451334877bc2e6c7fad18b404a47355 -size 2064079 diff --git a/data/midas-discourse.zip b/data/midas-discourse.zip deleted file mode 100644 index e59a0d5434f9184a5fcd3d675d2bd4e5edef036a..0000000000000000000000000000000000000000 --- a/data/midas-discourse.zip +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ddca828a39c7783d4504e5fdeda4aa1db6ca29c0e28b3c94e085a182163dee8b -size 1050668 diff --git a/data/soham-articles.zip b/data/soham-articles.zip deleted file mode 100644 index a69fae7d4fc0760ba03ffaafdf61ff3fbb8e48f0..0000000000000000000000000000000000000000 --- a/data/soham-articles.zip +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:fb2a564cc7ef45248370ff07b43a25f85f83a29f256ea1147b9e47a041725bde -size 11804705 diff --git a/data/wiki-cloze.zip b/data/wiki-cloze.zip deleted file mode 100644 index 284e023c95fca6098f30035326988f005a90bc47..0000000000000000000000000000000000000000 --- a/data/wiki-cloze.zip +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b22ff7631b079375dca9e22d7772805a13d13cfc1906b19d042015b0b72c7848 -size 65105024 diff --git a/data/wiki-section-titles.zip b/data/wiki-section-titles.zip deleted file mode 100644 index 5238df4b89ac1fe73ee6a2e3ac002a3543141738..0000000000000000000000000000000000000000 --- a/data/wiki-section-titles.zip +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:cf802c0fbb3741514b67e3a4c711c79601932ffed9321c727f9375a1e32ad913 -size 242028608 diff --git a/data/wikiann-ner.zip b/data/wikiann-ner.zip deleted file mode 100644 index 1822027ea80bf79c73afc676a01ae585d4384fbc..0000000000000000000000000000000000000000 --- a/data/wikiann-ner.zip +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a539df97a2942b1891bed69e33adbbabeffafe1ac45ccad61c5a14651c456167 -size 5997291 diff --git a/data/wnli-translated.zip b/data/wnli-translated.zip deleted file mode 100644 index 06f151484c82b986b58a1f162a8a395f0ec551c9..0000000000000000000000000000000000000000 --- a/data/wnli-translated.zip +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:3f31ff6b104cfb0d9e4acacb130c18cd0ff7529f10b9728c9171145cbdb1fdf8 -size 600688 diff --git a/iitp-mr.hi/test-00000-of-00001.parquet b/iitp-mr.hi/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a9e2d802ed2f0bbd762cb6e5547ea368f633a14a --- /dev/null +++ b/iitp-mr.hi/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1777d86e2c10b212d4fad34df4f89d2c5a9d22908e9db06a6901a128e713c7d7 +size 269358 diff --git a/iitp-mr.hi/train-00000-of-00001.parquet b/iitp-mr.hi/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b882c62fb0b71a866976e3144460b36bdcc47c42 --- /dev/null +++ b/iitp-mr.hi/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a78b2896805ed9096e4e2d2e21f3e78802c74c231a6fe83ff9c833840d6bddc +size 2565491 diff --git a/iitp-mr.hi/validation-00000-of-00001.parquet b/iitp-mr.hi/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f2175c5312db76111f6861aba479fafb079143e7 --- /dev/null +++ b/iitp-mr.hi/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f62812dfc7ea9c6f1919660a9acc64154bcd7a6a676d5b39353e2d7ac44f82e +size 316913 diff --git a/iitp-pr.hi/test-00000-of-00001.parquet b/iitp-pr.hi/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..1b07879f36e42940bdc37eeff47abc44a3755596 --- /dev/null +++ b/iitp-pr.hi/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ded8a00282ab6a6fa9ed180cd89c21e305dd14c5b80238df6ba5e0e7889c7029 +size 53042 diff --git a/iitp-pr.hi/train-00000-of-00001.parquet b/iitp-pr.hi/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..92be9fe757924e7add08f6a60329b955a4fab655 --- /dev/null +++ b/iitp-pr.hi/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:893e1414c36e09af92a3d5fabb82e3eba126ed36eaf4963041544a73a9999bf9 +size 403928 diff --git a/iitp-pr.hi/validation-00000-of-00001.parquet b/iitp-pr.hi/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d7ac06e0d97e605d3148a4c20284d3d43a2e7357 --- /dev/null +++ b/iitp-pr.hi/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c6ad7762a4261a27ffd306304912c61ed144d81f84bff6de7d7170387863587 +size 52852 diff --git a/indic_glue.py b/indic_glue.py deleted file mode 100644 index 5fee6f67c1a99c28ac203b0f6e21c350787bfab6..0000000000000000000000000000000000000000 --- a/indic_glue.py +++ /dev/null @@ -1,979 +0,0 @@ -"""The IndicGLUE benchmark.""" - - -import csv -import json -import textwrap - -import pandas as pd - -import datasets - - -_INDIC_GLUE_CITATION = """\ - @inproceedings{kakwani2020indicnlpsuite, - title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}}, - author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar}, - year={2020}, - booktitle={Findings of EMNLP}, -} -""" - -_INDIC_GLUE_DESCRIPTION = """\ - IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide - variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te. -""" - -_DESCRIPTIONS = { - "wnli": textwrap.dedent( - """ - The Winograd Schema Challenge (Levesque et al., 2011) is a reading comprehension task - in which a system must read a sentence with a pronoun and select the referent of that pronoun from - a list of choices. The examples are manually constructed to foil simple statistical methods: Each - one is contingent on contextual information provided by a single word or phrase in the sentence. - To convert the problem into sentence pair classification, we construct sentence pairs by replacing - the ambiguous pronoun with each possible referent. The task is to predict if the sentence with the - pronoun substituted is entailed by the original sentence. We use a small evaluation set consisting of - new examples derived from fiction books that was shared privately by the authors of the original - corpus. While the included training set is balanced between two classes, the test set is imbalanced - between them (65% not entailment). Also, due to a data quirk, the development set is adversarial: - hypotheses are sometimes shared between training and development examples, so if a model memorizes the - training examples, they will predict the wrong label on corresponding development set - example. As with QNLI, each example is evaluated separately, so there is not a systematic correspondence - between a model's score on this task and its score on the unconverted original task. We - call converted dataset WNLI (Winograd NLI). This dataset is translated and publicly released for 3 - Indian languages by AI4Bharat. - """ - ), - "copa": textwrap.dedent( - """ - The Choice Of Plausible Alternatives (COPA) evaluation provides researchers with a tool for assessing - progress in open-domain commonsense causal reasoning. COPA consists of 1000 questions, split equally - into development and test sets of 500 questions each. Each question is composed of a premise and two - alternatives, where the task is to select the alternative that more plausibly has a causal relation - with the premise. The correct alternative is randomized so that the expected performance of randomly - guessing is 50%. This dataset is translated and publicly released for 3 languages by AI4Bharat. - """ - ), - "sna": textwrap.dedent( - """ - This dataset is a collection of Bengali News articles. The dataset is used for classifying articles into - 6 different classes namely national, international, state, kolkata, entertainment and sports. - """ - ), - "csqa": textwrap.dedent( - """ - Given a text with an entity randomly masked, the task is to predict that masked entity from a list of 4 - candidate entities. The dataset contains around 239k examples across 11 languages. - """ - ), - "wstp": textwrap.dedent( - """ - Predict the correct title for a Wikipedia section from a given list of four candidate titles. - The dataset has 400k examples across 11 Indian languages. - """ - ), - "inltkh": textwrap.dedent( - """ - Obtained from inltk project. The corpus is a collection of headlines tagged with their news category. - Available for langauges: gu, ml, mr and ta. - """ - ), - "bbca": textwrap.dedent( - """ - This release consists of 4335 Hindi documents with tags from the BBC Hindi News website. - """ - ), - "cvit-mkb-clsr": textwrap.dedent( - """ - CVIT Maan ki Baat Dataset - Given a sentence in language $L_1$ the task is to retrieve its translation - from a set of candidate sentences in language $L_2$. - The dataset contains around 39k parallel sentence pairs across 8 Indian languages. - """ - ), - "iitp-mr": textwrap.dedent( - """ - IIT Patna Product Reviews: Sentiment analysis corpus for product reviews posted in Hindi. - """ - ), - "iitp-pr": textwrap.dedent( - """ - IIT Patna Product Reviews: Sentiment analysis corpus for product reviews posted in Hindi. - """ - ), - "actsa-sc": textwrap.dedent( - """ - ACTSA Corpus: Sentiment analysis corpus for Telugu sentences. - """ - ), - "md": textwrap.dedent( - """ - The Hindi Discourse Analysis dataset is a corpus for analyzing discourse modes present in its sentences. - It contains sentences from stories written by 11 famous authors from the 20th Century. 4-5 stories by - each author have been selected which were available in the public domain resulting in a collection of 53 stories. - Most of these short stories were originally written in Hindi but some of them were written in other Indian languages - and later translated to Hindi. - """ - ), - "wiki-ner": textwrap.dedent( - """ - The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been constructed using - the linked entities in Wikipedia pages for 282 different languages including Danish. - """ - ), -} - -_CITATIONS = { - "wnli": textwrap.dedent( - """ - @inproceedings{kakwani2020indicnlpsuite, - title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}}, - author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar}, - year={2020}, - booktitle={Findings of EMNLP}, - } - @inproceedings{Levesque2011TheWS, - title={The Winograd Schema Challenge}, - author={H. Levesque and E. Davis and L. Morgenstern}, - booktitle={KR}, - year={2011} - } - """ - ), - "copa": textwrap.dedent( - """ - @inproceedings{kakwani2020indicnlpsuite, - title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}}, - author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar}, - year={2020}, - booktitle={Findings of EMNLP}, - } - @inproceedings{Gordon2011SemEval2012T7, - title={SemEval-2012 Task 7: Choice of Plausible Alternatives: An Evaluation of Commonsense Causal Reasoning}, - author={Andrew S. Gordon and Zornitsa Kozareva and Melissa Roemmele}, - booktitle={SemEval@NAACL-HLT}, - year={2011} - } - """ - ), - "sna": textwrap.dedent( - """ - https://www.kaggle.com/csoham/classification-bengali-news-articles-indicnlp - """ - ), - "csqa": textwrap.dedent( - """ - @inproceedings{kakwani2020indicnlpsuite, - title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}}, - author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar}, - year={2020}, - booktitle={Findings of EMNLP}, - } - """ - ), - "wstp": textwrap.dedent( - """ - @inproceedings{kakwani2020indicnlpsuite, - title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}}, - author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar}, - year={2020}, - booktitle={Findings of EMNLP}, - } - """ - ), - "inltkh": textwrap.dedent( - """ - https://github.com/goru001/inltk - """ - ), - "bbca": textwrap.dedent( - """ - https://github.com/NirantK/hindi2vec/releases/tag/bbc-hindi-v0.1 - """ - ), - "cvit-mkb-clsr": textwrap.dedent( - """ - @inproceedings{siripragada-etal-2020-multilingual, - title = "A Multilingual Parallel Corpora Collection Effort for {I}ndian Languages", - author = "Siripragada, Shashank and - Philip, Jerin and - Namboodiri, Vinay P. and - Jawahar, C V", - booktitle = "Proceedings of the 12th Language Resources and Evaluation Conference", - month = may, - year = "2020", - address = "Marseille, France", - publisher = "European Language Resources Association", - url = "https://www.aclweb.org/anthology/2020.lrec-1.462", - pages = "3743--3751", - abstract = "We present sentence aligned parallel corpora across 10 Indian Languages - Hindi, Telugu, Tamil, Malayalam, Gujarati, Urdu, Bengali, Oriya, Marathi, Punjabi, and English - many of which are categorized as low resource. The corpora are compiled from online sources which have content shared across languages. The corpora presented significantly extends present resources that are either not large enough or are restricted to a specific domain (such as health). We also provide a separate test corpus compiled from an independent online source that can be independently used for validating the performance in 10 Indian languages. Alongside, we report on the methods of constructing such corpora using tools enabled by recent advances in machine translation and cross-lingual retrieval using deep neural network based methods.", - language = "English", - ISBN = "979-10-95546-34-4", - } - """ - ), - "iitp-mr": textwrap.dedent( - """ - @inproceedings{akhtar-etal-2016-hybrid, - title = "A Hybrid Deep Learning Architecture for Sentiment Analysis", - author = "Akhtar, Md Shad and - Kumar, Ayush and - Ekbal, Asif and - Bhattacharyya, Pushpak", - booktitle = "Proceedings of {COLING} 2016, the 26th International Conference on Computational Linguistics: Technical Papers", - month = dec, - year = "2016", - address = "Osaka, Japan", - publisher = "The COLING 2016 Organizing Committee", - url = "https://www.aclweb.org/anthology/C16-1047", - pages = "482--493", - abstract = "In this paper, we propose a novel hybrid deep learning archtecture which is highly efficient for sentiment analysis in resource-poor languages. We learn sentiment embedded vectors from the Convolutional Neural Network (CNN). These are augmented to a set of optimized features selected through a multi-objective optimization (MOO) framework. The sentiment augmented optimized vector obtained at the end is used for the training of SVM for sentiment classification. We evaluate our proposed approach for coarse-grained (i.e. sentence level) as well as fine-grained (i.e. aspect level) sentiment analysis on four Hindi datasets covering varying domains. In order to show that our proposed method is generic in nature we also evaluate it on two benchmark English datasets. Evaluation shows that the results of the proposed method are consistent across all the datasets and often outperforms the state-of-art systems. To the best of our knowledge, this is the very first attempt where such a deep learning model is used for less-resourced languages such as Hindi.", -} - """ - ), - "iitp-pr": textwrap.dedent( - """ - @inproceedings{akhtar-etal-2016-hybrid, - title = "A Hybrid Deep Learning Architecture for Sentiment Analysis", - author = "Akhtar, Md Shad and - Kumar, Ayush and - Ekbal, Asif and - Bhattacharyya, Pushpak", - booktitle = "Proceedings of {COLING} 2016, the 26th International Conference on Computational Linguistics: Technical Papers", - month = dec, - year = "2016", - address = "Osaka, Japan", - publisher = "The COLING 2016 Organizing Committee", - url = "https://www.aclweb.org/anthology/C16-1047", - pages = "482--493", - abstract = "In this paper, we propose a novel hybrid deep learning archtecture which is highly efficient for sentiment analysis in resource-poor languages. We learn sentiment embedded vectors from the Convolutional Neural Network (CNN). These are augmented to a set of optimized features selected through a multi-objective optimization (MOO) framework. The sentiment augmented optimized vector obtained at the end is used for the training of SVM for sentiment classification. We evaluate our proposed approach for coarse-grained (i.e. sentence level) as well as fine-grained (i.e. aspect level) sentiment analysis on four Hindi datasets covering varying domains. In order to show that our proposed method is generic in nature we also evaluate it on two benchmark English datasets. Evaluation shows that the results of the proposed method are consistent across all the datasets and often outperforms the state-of-art systems. To the best of our knowledge, this is the very first attempt where such a deep learning model is used for less-resourced languages such as Hindi.", - } - """ - ), - "actsa-sc": textwrap.dedent( - """ - @inproceedings{mukku-mamidi-2017-actsa, - title = "{ACTSA}: Annotated Corpus for {T}elugu Sentiment Analysis", - author = "Mukku, Sandeep Sricharan and - Mamidi, Radhika", - booktitle = "Proceedings of the First Workshop on Building Linguistically Generalizable {NLP} Systems", - month = sep, - year = "2017", - address = "Copenhagen, Denmark", - publisher = "Association for Computational Linguistics", - url = "https://www.aclweb.org/anthology/W17-5408", - doi = "10.18653/v1/W17-5408", - pages = "54--58", - abstract = "Sentiment analysis deals with the task of determining the polarity of a document or sentence and has received a lot of attention in recent years for the English language. With the rapid growth of social media these days, a lot of data is available in regional languages besides English. Telugu is one such regional language with abundant data available in social media, but it{'}s hard to find a labelled data of sentences for Telugu Sentiment Analysis. In this paper, we describe an effort to build a gold-standard annotated corpus of Telugu sentences to support Telugu Sentiment Analysis. The corpus, named ACTSA (Annotated Corpus for Telugu Sentiment Analysis) has a collection of Telugu sentences taken from different sources which were then pre-processed and manually annotated by native Telugu speakers using our annotation guidelines. In total, we have annotated 5457 sentences, which makes our corpus the largest resource currently available. The corpus and the annotation guidelines are made publicly available.", - } - """ - ), - "md": textwrap.dedent( - """ - @inproceedings{Dhanwal2020AnAD, - title={An Annotated Dataset of Discourse Modes in Hindi Stories}, - author={Swapnil Dhanwal and Hritwik Dutta and Hitesh Nankani and Nilay Shrivastava and Y. Kumar and Junyi Jessy Li and Debanjan Mahata and Rakesh Gosangi and Haimin Zhang and R. R. Shah and Amanda Stent}, - booktitle={LREC}, - year={2020} - } - """ - ), - "wiki-ner": textwrap.dedent( - """ - @inproceedings{pan-etal-2017-cross, - title = "Cross-lingual Name Tagging and Linking for 282 Languages", - author = "Pan, Xiaoman and - Zhang, Boliang and - May, Jonathan and - Nothman, Joel and - Knight, Kevin and - Ji, Heng", - booktitle = "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)", - month = jul, - year = "2017", - address = "Vancouver, Canada", - publisher = "Association for Computational Linguistics", - url = "https://www.aclweb.org/anthology/P17-1178", - doi = "10.18653/v1/P17-1178", - pages = "1946--1958", - abstract = "The ambitious goal of this work is to develop a cross-lingual name tagging and linking framework for 282 languages that exist in Wikipedia. Given a document in any of these languages, our framework is able to identify name mentions, assign a coarse-grained or fine-grained type to each mention, and link it to an English Knowledge Base (KB) if it is linkable. We achieve this goal by performing a series of new KB mining methods: generating {``}silver-standard{''} annotations by transferring annotations from English to other languages through cross-lingual links and KB properties, refining annotations through self-training and topic selection, deriving language-specific morphology features from anchor links, and mining word translation pairs from cross-lingual links. Both name tagging and linking results for 282 languages are promising on Wikipedia data and on-Wikipedia data.", - } - """ - ), -} - -_TEXT_FEATURES = { - "wnli": {"hypothesis": "sentence1", "premise": "sentence2"}, - "copa": {"premise": "premise", "choice1": "choice1", "choice2": "choice2", "question": "question"}, - "sna": {"text": "text"}, - "csqa": {"question": "question", "answer": "answer", "category": "category", "title": "title"}, - "wstp": { - "sectionText": "sectionText", - "correctTitle": "correctTitle", - "titleA": "titleA", - "titleB": "titleB", - "titleC": "titleC", - "titleD": "titleD", - "url": "url", - }, - "inltkh": {"text": "text"}, - "bbca": {"label": "label", "text": "text"}, - "cvit-mkb-clsr": {"sentence1": "sentence1", "sentence2": "sentence2"}, - "iitp-mr": {"text": "text"}, - "iitp-pr": {"text": "text"}, - "actsa-sc": {"text": "text"}, - "md": {"sentence": "sentence", "discourse_mode": "discourse_mode"}, - "wiki-ner": {}, -} - -_DATA_URLS = { - "wnli": "data/wnli-translated.zip", - "copa": "data/copa-translated.zip", - "sna": "data/soham-articles.zip", - "csqa": "data/wiki-cloze.zip", - "wstp": "data/wiki-section-titles.zip", - "inltkh": "data/inltk-headlines.zip", - "bbca": "data/bbc-articles.zip", - "cvit-mkb-clsr": "data/cvit-mkb.zip", - "iitp-mr": "data/iitp-movie-reviews.zip", - "iitp-pr": "data/iitp-product-reviews.zip", - "actsa-sc": "data/actsa.zip", - "md": "data/midas-discourse.zip", - "wiki-ner": "data/wikiann-ner.zip", -} - -_URLS = { - "wnli": "https://ai4bharat.iitm.ac.in/indic-glue", - "copa": "https://ai4bharat.iitm.ac.in/indic-glue", - "sna": "https://ai4bharat.iitm.ac.in/indic-glue", - "csqa": "https://ai4bharat.iitm.ac.in/indic-glue", - "wstp": "https://ai4bharat.iitm.ac.in/indic-glue", - "inltkh": "https://ai4bharat.iitm.ac.in/indic-glue", - "bbca": "https://ai4bharat.iitm.ac.in/indic-glue", - "cvit-mkb-clsr": "https://ai4bharat.iitm.ac.in/indic-glue", - "iitp-mr": "https://ai4bharat.iitm.ac.in/indic-glue", - "iitp-pr": "https://ai4bharat.iitm.ac.in/indic-glue", - "actsa-sc": "https://ai4bharat.iitm.ac.in/indic-glue", - "md": "https://ai4bharat.iitm.ac.in/indic-glue", - "wiki-ner": "https://ai4bharat.iitm.ac.in/indic-glue", -} - -_INDIC_GLUE_URL = "https://ai4bharat.iitm.ac.in/indic-glue" - -_WNLI_LANGS = ["en", "hi", "gu", "mr"] -_COPA_LANGS = ["en", "hi", "gu", "mr"] -_SNA_LANGS = ["bn"] -_CSQA_LANGS = ["as", "bn", "gu", "hi", "kn", "ml", "mr", "or", "pa", "ta", "te"] -_WSTP_LANGS = ["as", "bn", "gu", "hi", "kn", "ml", "mr", "or", "pa", "ta", "te"] -_iNLTKH_LANGS = ["gu", "ml", "mr", "ta", "te"] -_BBCA_LANGS = ["hi"] -_CVIT_MKB_CLSR = ["en-bn", "en-gu", "en-hi", "en-ml", "en-mr", "en-or", "en-ta", "en-te", "en-ur"] -_IITP_MR_LANGS = ["hi"] -_IITP_PR_LANGS = ["hi"] -_ACTSA_LANGS = ["te"] -_MD_LANGS = ["hi"] -_WIKI_NER_LANGS = ["as", "bn", "gu", "hi", "kn", "ml", "mr", "or", "pa", "ta", "te"] - -_NAMES = [] - -for lang in _WNLI_LANGS: - _NAMES.append(f"wnli.{lang}") - -for lang in _COPA_LANGS: - _NAMES.append(f"copa.{lang}") - -for lang in _SNA_LANGS: - _NAMES.append(f"sna.{lang}") - -for lang in _CSQA_LANGS: - _NAMES.append(f"csqa.{lang}") - -for lang in _WSTP_LANGS: - _NAMES.append(f"wstp.{lang}") - -for lang in _iNLTKH_LANGS: - _NAMES.append(f"inltkh.{lang}") - -for lang in _BBCA_LANGS: - _NAMES.append(f"bbca.{lang}") - -for lang in _CVIT_MKB_CLSR: - _NAMES.append(f"cvit-mkb-clsr.{lang}") - -for lang in _IITP_MR_LANGS: - _NAMES.append(f"iitp-mr.{lang}") - -for lang in _IITP_PR_LANGS: - _NAMES.append(f"iitp-pr.{lang}") - -for lang in _ACTSA_LANGS: - _NAMES.append(f"actsa-sc.{lang}") - -for lang in _MD_LANGS: - _NAMES.append(f"md.{lang}") - -for lang in _WIKI_NER_LANGS: - _NAMES.append(f"wiki-ner.{lang}") - - -class IndicGlueConfig(datasets.BuilderConfig): - """BuilderConfig for IndicGLUE.""" - - def __init__(self, data_url, citation, url, text_features, **kwargs): - """ - Args: - - data_url: `string`, url to download the zip file from. - citation: `string`, citation for the data set. - url: `string`, url for information about the data set. - text_features: `dict[string, string]`, map from the name of the feature - dict for each text field to the name of the column in the csv/json file - **kwargs: keyword arguments forwarded to super. - """ - super(IndicGlueConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs) - self.data_url = data_url - self.citation = citation - self.url = url - self.text_features = text_features - - -class IndicGlue(datasets.GeneratorBasedBuilder): - - BUILDER_CONFIGS = [ - IndicGlueConfig( - name=name, - description=_DESCRIPTIONS[name.split(".")[0]], - text_features=_TEXT_FEATURES[name.split(".")[0]], - data_url=_DATA_URLS[name.split(".")[0]], - citation=_CITATIONS[name.split(".")[0]], - url=_URLS[name.split(".")[0]], - ) - for name in _NAMES - ] - - def _info(self): - features = {text_feature: datasets.Value("string") for text_feature in self.config.text_features.keys()} - - if self.config.name.startswith("copa"): - features["label"] = datasets.Value("int32") - - if self.config.name.startswith("sna"): - features["label"] = datasets.features.ClassLabel( - names=["kolkata", "state", "national", "sports", "entertainment", "international"] - ) - - if self.config.name.startswith("inltkh"): - features["label"] = datasets.features.ClassLabel( - names=[ - "entertainment", - "business", - "tech", - "sports", - "state", - "spirituality", - "tamil-cinema", - "positive", - "negative", - "neutral", - ] - ) - - if self.config.name.startswith("iitp"): - features["label"] = datasets.features.ClassLabel(names=["negative", "neutral", "positive"]) - - if self.config.name.startswith("wnli"): - features["label"] = datasets.features.ClassLabel(names=["not_entailment", "entailment", "None"]) - - if self.config.name.startswith("actsa"): - features["label"] = datasets.features.ClassLabel(names=["positive", "negative"]) - - if self.config.name.startswith("csqa"): - features["options"] = datasets.features.Sequence(datasets.Value("string")) - features["out_of_context_options"] = datasets.features.Sequence(datasets.Value("string")) - - if self.config.name.startswith("md"): - features["story_number"] = datasets.Value("int32") - features["id"] = datasets.Value("int32") - - if self.config.name.startswith("wiki-ner"): - features["tokens"] = datasets.features.Sequence(datasets.Value("string")) - features["ner_tags"] = datasets.features.Sequence( - datasets.features.ClassLabel(names=["B-LOC", "B-ORG", "B-PER", "I-LOC", "I-ORG", "I-PER", "O"]) - ) - features["additional_info"] = datasets.features.Sequence( - datasets.features.Sequence(datasets.Value("string")) - ) - - return datasets.DatasetInfo( - description=_INDIC_GLUE_DESCRIPTION + "\n" + self.config.description, - features=datasets.Features(features), - homepage=self.config.url, - citation=_INDIC_GLUE_CITATION + "\n" + self.config.citation, - ) - - def _split_generators(self, dl_manager): - - if self.config.name.startswith("wnli"): - archive = dl_manager.download(self.config.data_url) - task_name = self._get_task_name_from_data_url(self.config.data_url) - dl_dir = task_name + "/" + self.config.name.split(".")[1] - return [ - datasets.SplitGenerator( - name=datasets.Split.TRAIN, - gen_kwargs={ - "datafile": dl_dir + "/" + "train.csv", - "split": datasets.Split.TRAIN, - "key": "train-split", - "files": dl_manager.iter_archive(archive), - }, - ), - datasets.SplitGenerator( - name=datasets.Split.VALIDATION, - gen_kwargs={ - "datafile": dl_dir + "/" + "dev.csv", - "split": datasets.Split.VALIDATION, - "key": "val-split", - "files": dl_manager.iter_archive(archive), - }, - ), - datasets.SplitGenerator( - name=datasets.Split.TEST, - gen_kwargs={ - "datafile": dl_dir + "/" + "test.csv", - "split": datasets.Split.TEST, - "key": "test-split", - "files": dl_manager.iter_archive(archive), - }, - ), - ] - - if self.config.name.startswith("copa"): - archive = dl_manager.download(self.config.data_url) - task_name = self._get_task_name_from_data_url(self.config.data_url) - dl_dir = task_name + "/" + self.config.name.split(".")[1] - - return [ - datasets.SplitGenerator( - name=datasets.Split.TRAIN, - gen_kwargs={ - "datafile": dl_dir + "/" + "train.jsonl", - "split": datasets.Split.TRAIN, - "key": "train-split", - "files": dl_manager.iter_archive(archive), - }, - ), - datasets.SplitGenerator( - name=datasets.Split.VALIDATION, - gen_kwargs={ - "datafile": dl_dir + "/" + "val.jsonl", - "split": datasets.Split.VALIDATION, - "key": "val-split", - "files": dl_manager.iter_archive(archive), - }, - ), - datasets.SplitGenerator( - name=datasets.Split.TEST, - gen_kwargs={ - "datafile": dl_dir + "/" + "test.jsonl", - "split": datasets.Split.TEST, - "key": "test-split", - "files": dl_manager.iter_archive(archive), - }, - ), - ] - - if self.config.name.startswith("sna"): - archive = dl_manager.download(self.config.data_url) - task_name = self._get_task_name_from_data_url(self.config.data_url) - dl_dir = task_name + "/" + self.config.name.split(".")[1] - - return [ - datasets.SplitGenerator( - name=datasets.Split.TRAIN, - gen_kwargs={ - "datafile": dl_dir + "/" + "bn-train.csv", - "split": datasets.Split.TRAIN, - "files": dl_manager.iter_archive(archive), - }, - ), - datasets.SplitGenerator( - name=datasets.Split.VALIDATION, - gen_kwargs={ - "datafile": dl_dir + "/" + "bn-valid.csv", - "split": datasets.Split.VALIDATION, - "files": dl_manager.iter_archive(archive), - }, - ), - datasets.SplitGenerator( - name=datasets.Split.TEST, - gen_kwargs={ - "datafile": dl_dir + "/" + "bn-test.csv", - "split": datasets.Split.TEST, - "files": dl_manager.iter_archive(archive), - }, - ), - ] - - if self.config.name.startswith("csqa"): - archive = dl_manager.download(self.config.data_url) - task_name = self._get_task_name_from_data_url(self.config.data_url) - dl_dir = task_name - - return [ - datasets.SplitGenerator( - name=datasets.Split.TEST, - gen_kwargs={ - "datafile": dl_dir + "/" + f"{self.config.name.split('.')[1]}.json", - "split": datasets.Split.TEST, - "files": dl_manager.iter_archive(archive), - }, - ) - ] - - if self.config.name.startswith("wstp"): - archive = dl_manager.download(self.config.data_url) - task_name = self._get_task_name_from_data_url(self.config.data_url) - dl_dir = task_name + "/" + self.config.name.split(".")[1] - - return [ - datasets.SplitGenerator( - name=datasets.Split.TRAIN, - gen_kwargs={ - "datafile": dl_dir + "/" + f"{self.config.name.split('.')[1]}-train.json", - "split": datasets.Split.TRAIN, - "files": dl_manager.iter_archive(archive), - }, - ), - datasets.SplitGenerator( - name=datasets.Split.VALIDATION, - gen_kwargs={ - "datafile": dl_dir + "/" + f"{self.config.name.split('.')[1]}-valid.json", - "split": datasets.Split.VALIDATION, - "files": dl_manager.iter_archive(archive), - }, - ), - datasets.SplitGenerator( - name=datasets.Split.TEST, - gen_kwargs={ - "datafile": dl_dir + "/" + f"{self.config.name.split('.')[1]}-test.json", - "split": datasets.Split.TEST, - "files": dl_manager.iter_archive(archive), - }, - ), - ] - - if ( - self.config.name.startswith("inltkh") - or self.config.name.startswith("iitp") - or self.config.name.startswith("actsa") - ): - archive = dl_manager.download(self.config.data_url) - task_name = self._get_task_name_from_data_url(self.config.data_url) - dl_dir = task_name + "/" + self.config.name.split(".")[1] - - return [ - datasets.SplitGenerator( - name=datasets.Split.TRAIN, - gen_kwargs={ - "datafile": dl_dir + "/" + f"{self.config.name.split('.')[1]}-train.csv", - "split": datasets.Split.TRAIN, - "files": dl_manager.iter_archive(archive), - }, - ), - datasets.SplitGenerator( - name=datasets.Split.VALIDATION, - gen_kwargs={ - "datafile": dl_dir + "/" + f"{self.config.name.split('.')[1]}-valid.csv", - "split": datasets.Split.VALIDATION, - "files": dl_manager.iter_archive(archive), - }, - ), - datasets.SplitGenerator( - name=datasets.Split.TEST, - gen_kwargs={ - "datafile": dl_dir + "/" + f"{self.config.name.split('.')[1]}-test.csv", - "split": datasets.Split.TEST, - "files": dl_manager.iter_archive(archive), - }, - ), - ] - - if self.config.name.startswith("bbca"): - archive = dl_manager.download(self.config.data_url) - task_name = self._get_task_name_from_data_url(self.config.data_url) - dl_dir = task_name + "/" + self.config.name.split(".")[1] - - return [ - datasets.SplitGenerator( - name=datasets.Split.TRAIN, - gen_kwargs={ - "datafile": dl_dir + "/" + f"{self.config.name.split('.')[1]}-train.csv", - "split": datasets.Split.TRAIN, - "files": dl_manager.iter_archive(archive), - }, - ), - datasets.SplitGenerator( - name=datasets.Split.TEST, - gen_kwargs={ - "datafile": dl_dir + "/" + f"{self.config.name.split('.')[1]}-test.csv", - "split": datasets.Split.TEST, - "files": dl_manager.iter_archive(archive), - }, - ), - ] - - if self.config.name.startswith("cvit"): - archive = dl_manager.download(self.config.data_url) - task_name = self._get_task_name_from_data_url(self.config.data_url) - dl_dir = task_name + "/" + self.config.name.split(".")[1] - - return [ - datasets.SplitGenerator( - name=datasets.Split.TEST, - gen_kwargs={ - "datafile": None, - "src": dl_dir + "/" + f"mkb.{self.config.name.split('.')[1].split('-')[0]}", - "tgt": dl_dir + "/" + f"mkb.{self.config.name.split('.')[1].split('-')[1]}", - "split": datasets.Split.TEST, - "files": dl_manager.iter_archive(archive), - }, - ) - ] - - if self.config.name.startswith("md"): - archive = dl_manager.download(self.config.data_url) - task_name = self._get_task_name_from_data_url(self.config.data_url) - dl_dir = task_name + "/" + self.config.name.split(".")[1] - - return [ - datasets.SplitGenerator( - name=datasets.Split.TRAIN, - gen_kwargs={ - "datafile": dl_dir + "/" + "train.json", - "split": datasets.Split.TRAIN, - "files": dl_manager.iter_archive(archive), - }, - ), - datasets.SplitGenerator( - name=datasets.Split.VALIDATION, - gen_kwargs={ - "datafile": dl_dir + "/" + "val.json", - "split": datasets.Split.VALIDATION, - "files": dl_manager.iter_archive(archive), - }, - ), - datasets.SplitGenerator( - name=datasets.Split.TEST, - gen_kwargs={ - "datafile": dl_dir + "/" + "test.json", - "split": datasets.Split.TEST, - "files": dl_manager.iter_archive(archive), - }, - ), - ] - - if self.config.name.startswith("wiki-ner"): - archive = dl_manager.download(self.config.data_url) - task_name = self._get_task_name_from_data_url(self.config.data_url) - dl_dir = task_name + "/" + self.config.name.split(".")[1] - - return [ - datasets.SplitGenerator( - name=datasets.Split.TRAIN, - gen_kwargs={ - "datafile": dl_dir + "/" + f"{self.config.name.split('.')[1]}-train.txt", - "split": datasets.Split.TRAIN, - "files": dl_manager.iter_archive(archive), - }, - ), - datasets.SplitGenerator( - name=datasets.Split.VALIDATION, - gen_kwargs={ - "datafile": dl_dir + "/" + f"{self.config.name.split('.')[1]}-valid.txt", - "split": datasets.Split.VALIDATION, - "files": dl_manager.iter_archive(archive), - }, - ), - datasets.SplitGenerator( - name=datasets.Split.TEST, - gen_kwargs={ - "datafile": dl_dir + "/" + f"{self.config.name.split('.')[1]}-test.txt", - "split": datasets.Split.TEST, - "files": dl_manager.iter_archive(archive), - }, - ), - ] - - def _generate_examples(self, **args): - """Yields examples.""" - filepath = args["datafile"] - files = args["files"] - - if self.config.name.startswith("wnli"): - if args["key"] == "test-split": - for path, f in files: - if path == filepath: - data = csv.DictReader((line.decode("utf-8") for line in f)) - for id_, row in enumerate(data): - yield id_, {"hypothesis": row["sentence1"], "premise": row["sentence2"], "label": "None"} - break - else: - for path, f in files: - if path == filepath: - data = csv.DictReader((line.decode("utf-8") for line in f)) - for id_, row in enumerate(data): - label = "entailment" if row["label"] else "not_entailment" - yield id_, { - "hypothesis": row["sentence1"], - "premise": row["sentence2"], - "label": label, - } - break - - if self.config.name.startswith("copa"): - if args["key"] == "test-split": - for path, f in files: - if path == filepath: - lines = f.readlines() - data = map(lambda l: json.loads(l), lines) - data = list(data) - for id_, row in enumerate(data): - yield id_, { - "premise": row["premise"], - "choice1": row["choice1"], - "choice2": row["choice2"], - "question": row["question"], - "label": 0, - } - break - else: - for path, f in files: - if path == filepath: - lines = f.readlines() - data = map(lambda l: json.loads(l), lines) - data = list(data) - for id_, row in enumerate(data): - yield id_, { - "premise": row["premise"], - "choice1": row["choice1"], - "choice2": row["choice2"], - "question": row["question"], - "label": row["label"], - } - break - - if self.config.name.startswith("sna"): - for path, f in files: - if path == filepath: - df = pd.read_csv(f, names=["label", "text"]) - for id_, row in df.iterrows(): - yield id_, {"text": row["text"], "label": row["label"]} - break - - if self.config.name.startswith("csqa"): - for path, f in files: - if path == filepath: - data = json.load(f) - df = pd.DataFrame(data["cloze_data"]) - df["out_of_context_options"].loc[df["out_of_context_options"].isnull()] = ( - df["out_of_context_options"].loc[df["out_of_context_options"].isnull()].apply(lambda x: []) - ) - for id_, row in df.iterrows(): - yield id_, { - "question": row["question"], - "answer": row["answer"], - "category": row["category"], - "title": row["title"], - "out_of_context_options": row["out_of_context_options"], - "options": row["options"], - } - break - - if self.config.name.startswith("wstp"): - for path, f in files: - if path == filepath: - df = pd.read_json(f) - for id_, row in df.iterrows(): - yield id_, { - "sectionText": row["sectionText"], - "correctTitle": row["correctTitle"], - "titleA": row["titleA"], - "titleB": row["titleB"], - "titleC": row["titleC"], - "titleD": row["titleD"], - "url": row["url"], - } - break - - if ( - self.config.name.startswith("inltkh") - or self.config.name.startswith("bbca") - or self.config.name.startswith("iitp") - ): - for path, f in files: - if path == filepath: - df = pd.read_csv(f, names=["label", "text"]) - for id_, row in df.iterrows(): - yield id_, {"text": row["text"], "label": row["label"]} - break - - if self.config.name.startswith("actsa"): - for path, f in files: - if path == filepath: - df = pd.read_csv(f, names=["label", "text"]) - for id_, row in df.iterrows(): - label = "positive" if row["label"] else "negative" - yield id_, {"text": row["text"], "label": label} - break - - if self.config.name.startswith("cvit"): - source = args["src"] - target = args["tgt"] - src, tgt = None, None - for path, f in files: - if path == source: - src = f.read().decode("utf-8").splitlines() - elif path == target: - tgt = f.read().decode("utf-8").splitlines() - if src is not None and tgt is not None: - for id_, row in enumerate(zip(src, tgt)): - yield id_, {"sentence1": row[0], "sentence2": row[1]} - break - - if self.config.name.startswith("md"): - for path, f in files: - if path == filepath: - df = pd.read_json(f) - for id_, row in df.iterrows(): - yield id_, { - "story_number": row["Story_no"], - "sentence": row["Sentence"], - "discourse_mode": row["Discourse Mode"], - "id": row["id"], - } - break - - if self.config.name.startswith("wiki-ner"): - for path, f in files: - if path == filepath: - data = f.read().decode("utf-8").splitlines() - tokens = [] - labels = [] - infos = [] - for id_, row in enumerate(data): - row = row.split() - - if len(row) == 0: - yield id_, {"tokens": tokens, "ner_tags": labels, "additional_info": infos} - tokens = [] - labels = [] - infos = [] - continue - - tokens.append(row[0]) - labels.append(row[-1]) - infos.append(row[1:-1]) - break - - def _get_task_name_from_data_url(self, data_url): - return data_url.split("/")[-1].split(".")[0] diff --git a/inltkh.gu/test-00000-of-00001.parquet b/inltkh.gu/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f5ece0f0205c577baec0723635a96211dd8ed467 --- /dev/null +++ b/inltkh.gu/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b9b977c1ac1d93cafc895a18975f41c70b68bf9e004f0f7c630dd5d16763744 +size 52310 diff --git a/inltkh.gu/train-00000-of-00001.parquet b/inltkh.gu/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d5be7224ed38aee9282d0df2905a9f418eb933a2 --- /dev/null +++ b/inltkh.gu/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca36f21492471b1d91464cbc036197d107ec2f6cd7cdb8197f710021fae84ab2 +size 409933 diff --git a/inltkh.gu/validation-00000-of-00001.parquet b/inltkh.gu/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..008befbdacd70d918cd5b231c79cbffbc00e2d08 --- /dev/null +++ b/inltkh.gu/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5429788f707c8be510c52659a74a415bb2c943ec92896314eccac1b534bd80f5 +size 52851 diff --git a/inltkh.ml/test-00000-of-00001.parquet b/inltkh.ml/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..72047fac5fbdb2b782eb1239cb42a01411a82168 --- /dev/null +++ b/inltkh.ml/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:082036fb9b83512f1edb7baf8f10a580a3cc2784c2c27763d82e1702ac2d8bf2 +size 57266 diff --git a/inltkh.ml/train-00000-of-00001.parquet b/inltkh.ml/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..822a4310494918ccc5c6ac67bba3eccf215f903e --- /dev/null +++ b/inltkh.ml/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97163cbfe177cb7b5a4a4e82ea2b6929aa7a0b1f3716387a16753a8c4f3900d9 +size 453922 diff --git a/inltkh.ml/validation-00000-of-00001.parquet b/inltkh.ml/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5a5707108dcfb975ef89e95bbf186acb7eb3ee67 --- /dev/null +++ b/inltkh.ml/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f5580eae9b62191f5aae5f368aa4a7f962a728cf4444edda7277681d36aac24 +size 59831 diff --git a/inltkh.mr/test-00000-of-00001.parquet b/inltkh.mr/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..63759531e86e367c6f55666cb0e2680e82e717da --- /dev/null +++ b/inltkh.mr/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:26b1dd2987021761b9697b90caca24c60007e649391a7f94c6329e111afc5e8d +size 85083 diff --git a/inltkh.mr/train-00000-of-00001.parquet b/inltkh.mr/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9ea93febeca81ea940f6cfb4c536cc41e53a814f --- /dev/null +++ b/inltkh.mr/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4b083798221f2c514500f5a4ec0c1282f61af863c4d0f7b93d1ff7661f4f6bc +size 670346 diff --git a/inltkh.mr/validation-00000-of-00001.parquet b/inltkh.mr/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..697fcacac82aa2fe6d04aafed7b16f717c8c7c58 --- /dev/null +++ b/inltkh.mr/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6054d9b8ab4f004dc7ac346f986b163eca9ffeb0ddd0e82f1578f0086cf96e3 +size 84875 diff --git a/inltkh.ta/test-00000-of-00001.parquet b/inltkh.ta/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..2cefa50f17fdb79c40367f7bdc1a105cd13a92ff --- /dev/null +++ b/inltkh.ta/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1197edb3f56ee3bfff95c58aa078e1683eb4d4300522f45e6a41ba036105eab3 +size 125841 diff --git a/inltkh.ta/train-00000-of-00001.parquet b/inltkh.ta/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..79380990be46a90171cff843a3ff491d8c36e996 --- /dev/null +++ b/inltkh.ta/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c32a7271ea1325ec34ce509fec401a918ec7de9074fec8bb4a9ec816dadb97f6 +size 1021095 diff --git a/inltkh.ta/validation-00000-of-00001.parquet b/inltkh.ta/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..fc9d86b060c47c2230641c869b7f6c97978167e0 --- /dev/null +++ b/inltkh.ta/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fbe3310a3b4f4a7e8fc35cc02da99863c075d7c4b18d11350cff5065ffcddbc5 +size 124326 diff --git a/inltkh.te/test-00000-of-00001.parquet b/inltkh.te/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..0d80a1c50a6cf4c863e5227928ae5b2c2bcc6d49 --- /dev/null +++ b/inltkh.te/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9994c9e9043ef0a94b5fa6d06378ebf089ebe05e3ec2c98592557c2ef1265600 +size 74728 diff --git a/inltkh.te/train-00000-of-00001.parquet b/inltkh.te/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e9868cd3f976f684138e51c92c5bb0fefcf2af7a --- /dev/null +++ b/inltkh.te/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7a334bbfff733a14c26e34b9102941197de45b268547683ed043ae59e3d4c13 +size 577652 diff --git a/inltkh.te/validation-00000-of-00001.parquet b/inltkh.te/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e8a870945f47fa257ce62ffd9b9e1b8f87e26d40 --- /dev/null +++ b/inltkh.te/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b79060153fefe679f2a0b4ec8ca809c00cbe37bba9c9f8f7cf2e416ea1d5f5a +size 73913 diff --git a/md.hi/test-00000-of-00001.parquet b/md.hi/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4d64a6de86527df09ab952a56506b179e002b0fd --- /dev/null +++ b/md.hi/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25cf4c2230186dcd726d3d5e8d95773b6739d33cf0f4c2f6ec6a342cdad194eb +size 94778 diff --git a/md.hi/train-00000-of-00001.parquet b/md.hi/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..594048960316b5f0bf442af2657a9626b8854c55 --- /dev/null +++ b/md.hi/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a669d59ac21a27279acca3503ccd077d81bfeff59936fff300eadd6b908bebae +size 749638 diff --git a/md.hi/validation-00000-of-00001.parquet b/md.hi/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..cadbdd42387ff019983191d64e1d7da2a357b080 --- /dev/null +++ b/md.hi/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:500508b45b65bc97de671d5f2a0da5b40cbd6a0054bfeb5e20d9c29f5d9a5e4a +size 95385 diff --git a/sna.bn/test-00000-of-00001.parquet b/sna.bn/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e1993ceb0cf8f6c08889667a57880f813ac20b80 --- /dev/null +++ b/sna.bn/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f98bdf87bbd53b19af5f1799aa0b6c23f8368cbfb83c65270801f8ee54ba6f9e +size 2151771 diff --git a/sna.bn/train-00000-of-00001.parquet b/sna.bn/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..fa595af227359d04f7884adb988e6a8be0b55c87 --- /dev/null +++ b/sna.bn/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0df754b33fffc37da33c546b07225f5aea4d91b63f95639c403740776a5e42cd +size 17166769 diff --git a/sna.bn/validation-00000-of-00001.parquet b/sna.bn/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..95074e40eaeecb262ee45e3972b4260d80f67a09 --- /dev/null +++ b/sna.bn/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac503268d525844820da1c5cb5084bb1f5f533ac6283c043b0a72d046063a30e +size 2097400 diff --git a/wiki-ner.as/test-00000-of-00001.parquet b/wiki-ner.as/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..15f667750fbe4fb5b0d8d47c458ceb816b8af80b --- /dev/null +++ b/wiki-ner.as/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11122d1b56f215d5e8badd840083761d0fdd6cc55208370d5f60aec55b0314b1 +size 7233 diff --git a/wiki-ner.as/train-00000-of-00001.parquet b/wiki-ner.as/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4c40238e0a7669215a2d27cde1b277f134774f1c --- /dev/null +++ b/wiki-ner.as/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48fc10283ef6bd563dca59ba8fe1253a8e2d3c8fb81e2a02ac70b76faf8ff3c4 +size 58524 diff --git a/wiki-ner.as/validation-00000-of-00001.parquet b/wiki-ner.as/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..2abcf2f72f276e1ba49883c5cbaa8db41ad54ba1 --- /dev/null +++ b/wiki-ner.as/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b376f3124e8558e851da2348e147a4d38cb1ff84e7f517f76cdaa2b712d49a39 +size 7162 diff --git a/wiki-ner.bn/test-00000-of-00001.parquet b/wiki-ner.bn/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..bf1b0172b176ee628b4554677cae41e23c7b76ce --- /dev/null +++ b/wiki-ner.bn/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2200897c75a78cd81404f64fe8a19829825b3bb7b1e4d122a2a787a79192de8f +size 98169 diff --git a/wiki-ner.bn/train-00000-of-00001.parquet b/wiki-ner.bn/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..fd95f87f7367ebfef2793fb210a930b0f6843031 --- /dev/null +++ b/wiki-ner.bn/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31c2002d52bbfc7e85adebf8a91c167cff85ae7d99d7ad0c2a0396444a5c7608 +size 1114512 diff --git a/wiki-ner.bn/validation-00000-of-00001.parquet b/wiki-ner.bn/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..8d9150c3dd5575eecb6e17ee54edc04dfb13f78b --- /dev/null +++ b/wiki-ner.bn/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b2b2995c0cea3a779b85e0ecc44baf99d98685c650cdcfb715923d341fb8c78 +size 65538 diff --git a/wiki-ner.gu/test-00000-of-00001.parquet b/wiki-ner.gu/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e56a5d518261ce462a6d859ea71e48f060e44275 --- /dev/null +++ b/wiki-ner.gu/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dfcbfd849f5932006a552203115d74b08868ae2f61feef23791d529cca510932 +size 33435 diff --git a/wiki-ner.gu/train-00000-of-00001.parquet b/wiki-ner.gu/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..1e7bf05a678ffced9e34914529332d73d12d6072 --- /dev/null +++ b/wiki-ner.gu/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9238436ddc55d035c398418c27b2bcd1f0dc9c34bcfe839cd86232e0ebd33a50 +size 258555 diff --git a/wiki-ner.gu/validation-00000-of-00001.parquet b/wiki-ner.gu/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..11de357bcb5ec4806a39da5e13add214a89a04a8 --- /dev/null +++ b/wiki-ner.gu/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf6ba256a175ca7b3fa865df0656ea97edac5efcb103499ce8dbb03751f702dd +size 37670 diff --git a/wiki-ner.hi/test-00000-of-00001.parquet b/wiki-ner.hi/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e1c3c9a24ed2d93f8d915912138de44391113959 --- /dev/null +++ b/wiki-ner.hi/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb2f683459da975db9107b6e021c381d97eaf524f810f86a949ca82699700f9d +size 88085 diff --git a/wiki-ner.hi/train-00000-of-00001.parquet b/wiki-ner.hi/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d2fda5cc893cab48b298bad91be6cf2c30dd442b --- /dev/null +++ b/wiki-ner.hi/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:968dfed393f8e0fa45b04ab10dea826dd2a2d4f4cda55806c820d7cb419ebab6 +size 749013 diff --git a/wiki-ner.hi/validation-00000-of-00001.parquet b/wiki-ner.hi/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..962ff4d80bd6a3b1c62becb152d022dba63ca0a2 --- /dev/null +++ b/wiki-ner.hi/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c3b49010ce53e2dfe1ba2c989b1ad149e6d703630c914b9f5c68d58ad254de0 +size 111034 diff --git a/wiki-ner.kn/test-00000-of-00001.parquet b/wiki-ner.kn/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ec36fc0b05fe8448e1c708ce8ab167baeaf2ae5f --- /dev/null +++ b/wiki-ner.kn/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99cd2ad5bf45ba2f16fa24212be259734590e68937f1f643f4e527d8a101a0b2 +size 47282 diff --git a/wiki-ner.kn/train-00000-of-00001.parquet b/wiki-ner.kn/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..19f571aefbb3f26554936d4c04b4cdf6e33e162b --- /dev/null +++ b/wiki-ner.kn/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89a0c5f084a1ab11348e7e29a758d45cc4fb55709cfd67b2cd1415b5b19a7721 +size 326502 diff --git a/wiki-ner.kn/validation-00000-of-00001.parquet b/wiki-ner.kn/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..fefa84752c177c694ec2d681c4c876c2b3e23d54 --- /dev/null +++ b/wiki-ner.kn/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b0819f6c0a10f0855539713697c8e1c5aa62d0f39ad95db6fc1f5eb337e1079 +size 48093 diff --git a/wiki-ner.ml/test-00000-of-00001.parquet b/wiki-ner.ml/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..24bed2b0017475e9901568305a9cc2d6a86c6860 --- /dev/null +++ b/wiki-ner.ml/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6671f3d9d42ad511209ca40ec7cf652d390c5f936104631fb6b294d64184afc2 +size 241349 diff --git a/wiki-ner.ml/train-00000-of-00001.parquet b/wiki-ner.ml/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..727d68956d8ea1ebbd363b73ff27ac1784bef20f --- /dev/null +++ b/wiki-ner.ml/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5433885c95251c6ebd907f63a0f69c157807ef7fe55a940f9d5f612909e9322b +size 1904709 diff --git a/wiki-ner.ml/validation-00000-of-00001.parquet b/wiki-ner.ml/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..135b44e8d1821ed70c6ab103482828a6969028e1 --- /dev/null +++ b/wiki-ner.ml/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:83680b721808ad35fefc5c8edeeafc76c55bbc6b367ff78c5b882dbe97054995 +size 244384 diff --git a/wiki-ner.mr/test-00000-of-00001.parquet b/wiki-ner.mr/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..78e0e52de1f6e6ed503f51f5b6b599095f30bd82 --- /dev/null +++ b/wiki-ner.mr/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae6c5aa1798cf2dfcf7027aabf85931893a3420eb1f52160ceb60ecf1b420e20 +size 142230 diff --git a/wiki-ner.mr/train-00000-of-00001.parquet b/wiki-ner.mr/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..77a8dcfb678b1f356d1a0d8de8223670bf5c2ca1 --- /dev/null +++ b/wiki-ner.mr/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bcc7ea616b0ff3e2061d6023fd357af0683cf0e1f9bcf0f5e0b933cd28a4c8ee +size 1122748 diff --git a/wiki-ner.mr/validation-00000-of-00001.parquet b/wiki-ner.mr/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..43fde95c3835fa393784130a7aa2de68e67db0bf --- /dev/null +++ b/wiki-ner.mr/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6a330804aad31ff1f58ecfea2bb11274b7359214ce04b6da34f1008a14cbebb +size 145685 diff --git a/wiki-ner.or/test-00000-of-00001.parquet b/wiki-ner.or/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..aa0371350e3d08de71f3475570a277e101473bee --- /dev/null +++ b/wiki-ner.or/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c3519a2dfa256740e3d942e67cd5fffc2df76545e1b3a42264e5aca3dbd1da7 +size 16316 diff --git a/wiki-ner.or/train-00000-of-00001.parquet b/wiki-ner.or/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a291e7d75b012954b423d8c6cafaab7847ff3ac9 --- /dev/null +++ b/wiki-ner.or/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:423f767a3d515d2ff01ccc78fa4141594345ddae2d863c6a3bb764b9ad7ce1d6 +size 70290 diff --git a/wiki-ner.or/validation-00000-of-00001.parquet b/wiki-ner.or/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4b5a10db83a869a9fc729bb9ccc63d6a884c9d80 --- /dev/null +++ b/wiki-ner.or/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:270fbaee173988855704f320f5b22c7be038a90841feac39adf32cbc1235dd5a +size 16177 diff --git a/wiki-ner.pa/test-00000-of-00001.parquet b/wiki-ner.pa/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..6f38a3ac5e9d01cdfdb52a86480a0aa97c7fd13e --- /dev/null +++ b/wiki-ner.pa/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51a6a566404a1f76809089dab955ced52107c1d09324d64966ed7dd7a2336b68 +size 17796 diff --git a/wiki-ner.pa/train-00000-of-00001.parquet b/wiki-ner.pa/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..590fe056e288e9152b0161622e400ba01478e91e --- /dev/null +++ b/wiki-ner.pa/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2cb821a04d663fb76978e88918ed118d013a0ac63064fbec6804aa7e0bb59c71 +size 111829 diff --git a/wiki-ner.pa/validation-00000-of-00001.parquet b/wiki-ner.pa/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c733834edeb6d2aafff39e822091aa2e3b9148db --- /dev/null +++ b/wiki-ner.pa/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:543081164da7cd18d79cc0f33a7a6ee851407d4c85acf80b80958812a8ff8126 +size 20102 diff --git a/wiki-ner.ta/test-00000-of-00001.parquet b/wiki-ner.ta/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..24569c8cb547c5fcdcc302764269f3343a2c8874 --- /dev/null +++ b/wiki-ner.ta/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cba6c8bd8a18b321cf9efe511c7c3a2279cd1c1e61d4425687ed67372bf1456e +size 258966 diff --git a/wiki-ner.ta/train-00000-of-00001.parquet b/wiki-ner.ta/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e9225cb7b2d5256b3c7dd7ea5bbdc435297d8dbc --- /dev/null +++ b/wiki-ner.ta/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9218dae64baf6b8e1f94f06b2789bad13adff67f3801beac98e204586503051 +size 2267960 diff --git a/wiki-ner.ta/validation-00000-of-00001.parquet b/wiki-ner.ta/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d08dbc78cf92402d1be3e2c007d45d7476b92fe2 --- /dev/null +++ b/wiki-ner.ta/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c3a9430e595a4ddccd5bcf7d12520fba6987ee47e479a19de4a384ab691a7a63 +size 292157 diff --git a/wiki-ner.te/test-00000-of-00001.parquet b/wiki-ner.te/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ffa10f5f713cefc88023b53db9dbecdf30f29e08 --- /dev/null +++ b/wiki-ner.te/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d05e027d80b8f965b2a063e3d940077514a210e054587c49f1f08c344f8b1e7 +size 117325 diff --git a/wiki-ner.te/train-00000-of-00001.parquet b/wiki-ner.te/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..13dc6c685c1f91112182c668bfa0be8cadca0866 --- /dev/null +++ b/wiki-ner.te/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:419cc51b764844a11d7028cca85acfd9c389889b268245748462206bc39ef05a +size 784255 diff --git a/wiki-ner.te/validation-00000-of-00001.parquet b/wiki-ner.te/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f5cd338d2eb77be2dafbae8ee79b3d07cdd26d75 --- /dev/null +++ b/wiki-ner.te/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c79d61a4871294f7021e2fda381ec9004154dab041549bc4b0120726bd1df949 +size 105301 diff --git a/wnli.en/test-00000-of-00001.parquet b/wnli.en/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..256a5ba0f517abc05ab5955a66e64cdcc0233cd6 --- /dev/null +++ b/wnli.en/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55a49c732c59754b81ff34c01d1b16f272334e89a0894c4c8e769890781bcaa4 +size 12405 diff --git a/wnli.en/train-00000-of-00001.parquet b/wnli.en/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..6729f8647e75f3c4128d5cc474fb181cddf0a1e3 --- /dev/null +++ b/wnli.en/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:416dd56567d4ec1574b6f60887e10a65931250f345f80a146c85ac0ec2a76050 +size 34980 diff --git a/wnli.en/validation-00000-of-00001.parquet b/wnli.en/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5921cbf8b6a6aadb9ae20c408828301ad3233c43 --- /dev/null +++ b/wnli.en/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ed6459489a5fb8873ce967c7de65b440ef387d39a7e843c9b1393411182fe5cb +size 10282 diff --git a/wnli.gu/test-00000-of-00001.parquet b/wnli.gu/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..013e4b002443dbb996cd3cb1517b709769de1bb0 --- /dev/null +++ b/wnli.gu/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03760fa8dcb735e5cd004fdf9030b9b69f13dd25248823335f3f9ded4949b742 +size 20208 diff --git a/wnli.gu/train-00000-of-00001.parquet b/wnli.gu/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e0aa625e3f5ef56d12c1e41cef7ec53f2fc79184 --- /dev/null +++ b/wnli.gu/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb6b3533f301f921706693f9185a564d158ab28c1a31e0b97451f4c8584c8d69 +size 62939 diff --git a/wnli.gu/validation-00000-of-00001.parquet b/wnli.gu/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f8a57d66ac10a24732328057279bee4343b49349 --- /dev/null +++ b/wnli.gu/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a25ba9b8371e19835ba8ffa2a92b7a7aef561e863502df4225940fc19a497999 +size 14885 diff --git a/wnli.hi/test-00000-of-00001.parquet b/wnli.hi/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..52e0e6eb3603cb38e09cf0b7c7a5ca8238e606c0 --- /dev/null +++ b/wnli.hi/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f9219a4d8f38ed371085de80841082253b6547c6a228e719d30a1543985ff0c +size 20533 diff --git a/wnli.hi/train-00000-of-00001.parquet b/wnli.hi/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..6d857d6c2b1f7856ae408e8f2e25ff4f67ec8a67 --- /dev/null +++ b/wnli.hi/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d3bafb588025b316cf42c5e9029092a23c0de75261825e2bca7d9814d6187b9 +size 62791 diff --git a/wnli.hi/validation-00000-of-00001.parquet b/wnli.hi/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e34d9e7ff4017c778bab3b42516a2421edc10828 --- /dev/null +++ b/wnli.hi/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:277d29861d469fe5c39e14148767d880618409301bf5878770c8ef3a360e3a88 +size 16126 diff --git a/wnli.mr/test-00000-of-00001.parquet b/wnli.mr/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d658146158e621a1e1e1a92ce71e26c9f8dc737d --- /dev/null +++ b/wnli.mr/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c4b789e9d5d826b9dd878cab1e82c82fcbdd4487743138984f48f9c5f5f917a8 +size 21826 diff --git a/wnli.mr/train-00000-of-00001.parquet b/wnli.mr/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b3e9785c441a3381ed7576ecafb46ee248b58d68 --- /dev/null +++ b/wnli.mr/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14bef81b1dda040ae37407931719082e83ca2eec9e778e586b215bffec0c014f +size 65500 diff --git a/wnli.mr/validation-00000-of-00001.parquet b/wnli.mr/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..207c892ca10a5422d6b266f452ed68ea70ecc78a --- /dev/null +++ b/wnli.mr/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:077575d096d512b507ae9f130c345ee030d4b59062137e540e37b0ed4c2976ef +size 16448 diff --git a/wstp.as/test-00000-of-00001.parquet b/wstp.as/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..0bb332da661eb1861026f3ebf9485676d76b3821 --- /dev/null +++ b/wstp.as/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8fd0f53dd4daa4dd30bc069b37fccc1203fc4a349c1c242c142642bb99df7bc0 +size 703532 diff --git a/wstp.as/train-00000-of-00001.parquet b/wstp.as/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..dd7657e3dc1ed879f2e96ef76825fd173aabfb8c --- /dev/null +++ b/wstp.as/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b125e1de5120b1ff9ac3d680c47bf4e04b83bc19ea55712b2f1181bcf3390bcf +size 5546320 diff --git a/wstp.as/validation-00000-of-00001.parquet b/wstp.as/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..11639180aa96566d0abade5c3e7c99cb572b4aad --- /dev/null +++ b/wstp.as/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06bad1ad39b7698eebd3779706c7d1decc4699dd6b8bcc410c0496b24b4c983d +size 709606 diff --git a/wstp.bn/test-00000-of-00001.parquet b/wstp.bn/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a0a3ab23d993cf47445b18ae099db302eb232a9e --- /dev/null +++ b/wstp.bn/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01552ffd5f0300e3d05a24ca758598ca5f2045f897921eb0c0cf80f0046cdcfa +size 6841635 diff --git a/wstp.bn/train-00000-of-00001.parquet b/wstp.bn/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..63bacf85c5478632213069ffcee242b952f9a3e9 --- /dev/null +++ b/wstp.bn/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01b8ca450f2ae1dc7b6e4837a66d5ece7802e106d398d3bcc1bb5042c7a11770 +size 55425218 diff --git a/wstp.bn/validation-00000-of-00001.parquet b/wstp.bn/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c8a1e65010499b6a14946fc7fdafbc7a4f83d45c --- /dev/null +++ b/wstp.bn/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:401f41d6210e40adc7640f6941314438779eac410f7e597e8187c801daae9f35 +size 6878519 diff --git a/wstp.gu/test-00000-of-00001.parquet b/wstp.gu/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ac1569604217b21bcc868f17d4babd537e385c67 --- /dev/null +++ b/wstp.gu/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e37197d9aef54644cb9b918cbffbc304ec2f26ec11d7edc22522b19535458410 +size 1906778 diff --git a/wstp.gu/train-00000-of-00001.parquet b/wstp.gu/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a28e6f5b6eeb140f4794e9fb5e9f035dfa3afb00 --- /dev/null +++ b/wstp.gu/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34dbbc19383b17a464b58dc28595cdc96688d5ba79d1f8b4ae9d408b45ea7b99 +size 15869327 diff --git a/wstp.gu/validation-00000-of-00001.parquet b/wstp.gu/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ab7212bc985071def61664436b2adaf510336904 --- /dev/null +++ b/wstp.gu/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:643681b86bab7ef17aae8b93e9da5a6bc817c07c5104ad7c6f8b7e109108136c +size 1987144 diff --git a/wstp.hi/test-00000-of-00001.parquet b/wstp.hi/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f05fea0d7f5af55a8c91948cb7c556c033538de6 --- /dev/null +++ b/wstp.hi/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e2a01e0aa33759ab891358ae7a0383cb7ca45115315585ff016a3fa19e5f64f0 +size 7748096 diff --git a/wstp.hi/train-00000-of-00001.parquet b/wstp.hi/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..37a8e849d20284a19319a9a39fad311a66eebd16 --- /dev/null +++ b/wstp.hi/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:337e5d30cba727b7b7943fd71fec1e8547c7dfafcda897f4d14944d9db67d60f +size 62460274 diff --git a/wstp.hi/validation-00000-of-00001.parquet b/wstp.hi/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..88aeb90082c44e12837da5e26633322ba69a8747 --- /dev/null +++ b/wstp.hi/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7fc68f7f1c18e00eea6358a9a43e25589c67cef281d1fde374890174f6e06a9 +size 7660204 diff --git a/wstp.kn/test-00000-of-00001.parquet b/wstp.kn/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ae7483e8273f69d83d399e45c350667d1c8bd76b --- /dev/null +++ b/wstp.kn/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53276af490b13d8fcc5edd2d9cef65a569142ee1fb0448582f8124f5932c1090 +size 6900868 diff --git a/wstp.kn/train-00000-of-00001.parquet b/wstp.kn/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b2d743674b17c8b4479dbe1cf81293d52a7f2547 --- /dev/null +++ b/wstp.kn/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9664f2763b5e239662d5e00f0f159c690e969a0f4bbc32fb59015503c3afaf2 +size 53966606 diff --git a/wstp.kn/validation-00000-of-00001.parquet b/wstp.kn/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..97d294436645949bfdfd7d1e1841b7a326a32d40 --- /dev/null +++ b/wstp.kn/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0a2c745bad806a2ebd2896c35f070a23252bd7c36758cabef5de0c36d9915c9 +size 6852030 diff --git a/wstp.ml/test-00000-of-00001.parquet b/wstp.ml/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..891f179ea93f11d55d527faa52812bb6dcb4c80b --- /dev/null +++ b/wstp.ml/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64ab1c1da7d68b714c059ecf90963e87c224ed9522a94c36f601b0d641943ffd +size 4264710 diff --git a/wstp.ml/train-00000-of-00001.parquet b/wstp.ml/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..cca2bde9850a0424d889db0bebb81559cf9c0dbe --- /dev/null +++ b/wstp.ml/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f24ffb6384b1fd1e47df984b8e235481ac240a35abdb4e7930052f7d4d2d54ff +size 33787041 diff --git a/wstp.ml/validation-00000-of-00001.parquet b/wstp.ml/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..633cb680a44597b058972464289f145608f7df6e --- /dev/null +++ b/wstp.ml/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3663a6de1ddb507e422377683108a4507ab88fd2128bf094ddfcb100bcee863e +size 4284606 diff --git a/wstp.mr/test-00000-of-00001.parquet b/wstp.mr/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..302cfe7ce396e5eee9c61fc84da745e52c2794ef --- /dev/null +++ b/wstp.mr/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:26d6b3e99fb1606a846eb1d114343f8834981fd3dfcd100bc7a2b0cd5f7ccae1 +size 1433132 diff --git a/wstp.mr/train-00000-of-00001.parquet b/wstp.mr/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..28aa35b6c424abfe2283c3cc5c4a935c1fd123fa --- /dev/null +++ b/wstp.mr/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:538a6a222cb1fc7f7bcbec458c7e30084c267484bb238c8a3295ac78d5a28152 +size 11129731 diff --git a/wstp.mr/validation-00000-of-00001.parquet b/wstp.mr/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..36313cdb5840b8a2551da862dee16c1c2e9d17ca --- /dev/null +++ b/wstp.mr/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:987ab84fbfe0e034ae5de30c5559b2f59253131228e31566167af68a7a1b17f5 +size 1323345 diff --git a/wstp.or/test-00000-of-00001.parquet b/wstp.or/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ad9f4e1f2a23874c3e47e16853923db7197f4d30 --- /dev/null +++ b/wstp.or/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8fe56995ae118bebda6d0677a764cb5e63b9a9175d9a29bd6234caa26f0eabdc +size 542776 diff --git a/wstp.or/train-00000-of-00001.parquet b/wstp.or/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..08070fb2fb89e0fdf9ec71f3a48b5835d470fd70 --- /dev/null +++ b/wstp.or/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a223bf7bd9679f975c87e56f8f0a597b8068d0dfa52189163bb5f161be92df0 +size 4275035 diff --git a/wstp.or/validation-00000-of-00001.parquet b/wstp.or/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..1746d506a497875eb3153e2189501d1f7c5e798a --- /dev/null +++ b/wstp.or/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ec05ade4727377a84f54f9515bc67cc38d18297c8584b2bc6d2bdf1faea0310 +size 501317 diff --git a/wstp.pa/test-00000-of-00001.parquet b/wstp.pa/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..af57463d8268a02a4815985fd76e0de815adb203 --- /dev/null +++ b/wstp.pa/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73df01f4723bba70cd998643a63b71cce1325d95784b19a5b8f437b3d2715926 +size 1101943 diff --git a/wstp.pa/train-00000-of-00001.parquet b/wstp.pa/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d9cf951b08ed30dcfd0ba692ebf461dce288503a --- /dev/null +++ b/wstp.pa/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc0c4da1bd46a5cbbf88c9f49ade4faab48dd991be4ddf932372910ac0ddef24 +size 8952084 diff --git a/wstp.pa/validation-00000-of-00001.parquet b/wstp.pa/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..2f4b4fe846e16424402d2044e0bdc2163441c473 --- /dev/null +++ b/wstp.pa/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca0277dd450cd5f51d660bf51a9b5275986748cb182af91d66eba7b3e618ce96 +size 1147342 diff --git a/wstp.ta/test-00000-of-00001.parquet b/wstp.ta/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..1f3a7320b17a53efee25a33c44059519f402e712 --- /dev/null +++ b/wstp.ta/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:267f6b4b01a0d3632200bd24c0597e5ce63531d0a350480cd51f8163e58e7ab4 +size 6812449 diff --git a/wstp.ta/train-00000-of-00001.parquet b/wstp.ta/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..1938ee43c3a610a881ac506ad75357e186a30497 --- /dev/null +++ b/wstp.ta/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b646e8d4a2a227e358b80cbce884e5eb315fe87f291f2e331d43bff9ca2bb44b +size 55076513 diff --git a/wstp.ta/validation-00000-of-00001.parquet b/wstp.ta/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..57499e8d2b04e21eec075014801a63015f0c450c --- /dev/null +++ b/wstp.ta/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f2918d5d1d400cdd0c57ed5defc9f8f1506541506294f403b36f72372799608 +size 6810130 diff --git a/wstp.te/test-00000-of-00001.parquet b/wstp.te/test-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..550b0a24d26a7e20ba7c1a15d982f29f8d3fb591 --- /dev/null +++ b/wstp.te/test-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e8170f3282454a3efb22e478c527319eaaa342cd45cc47a1529f04a05f66e61 +size 4993785 diff --git a/wstp.te/train-00000-of-00001.parquet b/wstp.te/train-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ad94d1afffaed11dae0854b80ec1c29734d34ead --- /dev/null +++ b/wstp.te/train-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:645323d11310d21179e11fc055f8913d29d5568d5cf2fe00cff5b103a11e089e +size 40140757 diff --git a/wstp.te/validation-00000-of-00001.parquet b/wstp.te/validation-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..2c2ccee3bef9e0399a5e72f5636b0202efd020fa --- /dev/null +++ b/wstp.te/validation-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca5a9b59c49d72a7f570a9324684201c0070fc0110f3a7d474ca137cd6e61f4d +size 5024038