albertvillanova HF staff commited on
Commit
41aa009
1 Parent(s): 06cf937

Delete legacy JSON metadata (#5)

Browse files

- Delete legacy JSON metadata (204118429439112f88f1d0979fcddd51ca3b801e)

Files changed (1) hide show
  1. dataset_infos.json +0 -1
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"gendered_words": {"description": "Machine learning models are trained to find patterns in data.\nNLP models can inadvertently learn socially undesirable patterns when training on gender biased text.\nIn this work, we propose a general framework that decomposes gender bias in text along several pragmatic and semantic dimensions:\nbias from the gender of the person being spoken about, bias from the gender of the person being spoken to, and bias from the gender of the speaker.\nUsing this fine-grained framework, we automatically annotate eight large scale datasets with gender information.\nIn addition, we collect a novel, crowdsourced evaluation benchmark of utterance-level gender rewrites.\nDistinguishing between gender bias along multiple dimensions is important, as it enables us to train finer-grained gender bias classifiers.\nWe show our classifiers prove valuable for a variety of important applications, such as controlling for gender bias in generative models,\ndetecting gender bias in arbitrary text, and shed light on offensive language in terms of genderedness.\n", "citation": "@inproceedings{md_gender_bias,\n author = {Emily Dinan and\n Angela Fan and\n Ledell Wu and\n Jason Weston and\n Douwe Kiela and\n Adina Williams},\n editor = {Bonnie Webber and\n Trevor Cohn and\n Yulan He and\n Yang Liu},\n title = {Multi-Dimensional Gender Bias Classification},\n booktitle = {Proceedings of the 2020 Conference on Empirical Methods in Natural\n Language Processing, {EMNLP} 2020, Online, November 16-20, 2020},\n pages = {314--331},\n publisher = {Association for Computational Linguistics},\n year = {2020},\n url = {https://www.aclweb.org/anthology/2020.emnlp-main.23/}\n}\n", "homepage": "https://parl.ai/projects/md_gender/", "license": "MIT License", "features": {"word_masculine": {"dtype": "string", "id": null, "_type": "Value"}, "word_feminine": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "md_gender_bias", "config_name": "gendered_words", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 4988, "num_examples": 222, "dataset_name": "md_gender_bias"}}, "download_checksums": {"http://parl.ai/downloads/md_gender/gend_multiclass_10072020.tgz": {"num_bytes": 232629010, "checksum": "c2c03257c53497b9e453600201fc7245b55dec1d98965093b4657fdb54822e9d"}}, "download_size": 232629010, "post_processing_size": null, "dataset_size": 4988, "size_in_bytes": 232633998}, "name_genders": {"description": "Machine learning models are trained to find patterns in data.\nNLP models can inadvertently learn socially undesirable patterns when training on gender biased text.\nIn this work, we propose a general framework that decomposes gender bias in text along several pragmatic and semantic dimensions:\nbias from the gender of the person being spoken about, bias from the gender of the person being spoken to, and bias from the gender of the speaker.\nUsing this fine-grained framework, we automatically annotate eight large scale datasets with gender information.\nIn addition, we collect a novel, crowdsourced evaluation benchmark of utterance-level gender rewrites.\nDistinguishing between gender bias along multiple dimensions is important, as it enables us to train finer-grained gender bias classifiers.\nWe show our classifiers prove valuable for a variety of important applications, such as controlling for gender bias in generative models,\ndetecting gender bias in arbitrary text, and shed light on offensive language in terms of genderedness.\n", "citation": "@inproceedings{md_gender_bias,\n author = {Emily Dinan and\n Angela Fan and\n Ledell Wu and\n Jason Weston and\n Douwe Kiela and\n Adina Williams},\n editor = {Bonnie Webber and\n Trevor Cohn and\n Yulan He and\n Yang Liu},\n title = {Multi-Dimensional Gender Bias Classification},\n booktitle = {Proceedings of the 2020 Conference on Empirical Methods in Natural\n Language Processing, {EMNLP} 2020, Online, November 16-20, 2020},\n pages = {314--331},\n publisher = {Association for Computational Linguistics},\n year = {2020},\n url = {https://www.aclweb.org/anthology/2020.emnlp-main.23/}\n}\n", "homepage": "https://parl.ai/projects/md_gender/", "license": "MIT License", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "assigned_gender": {"num_classes": 2, "names": ["M", "F"], "names_file": null, "id": null, "_type": "ClassLabel"}, "count": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "md_gender_bias", "config_name": "name_genders", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"yob1880": {"name": "yob1880", "num_bytes": 43404, "num_examples": 2000, "dataset_name": "md_gender_bias"}, "yob1881": {"name": "yob1881", "num_bytes": 41944, "num_examples": 1935, "dataset_name": "md_gender_bias"}, "yob1882": {"name": "yob1882", "num_bytes": 46211, "num_examples": 2127, "dataset_name": "md_gender_bias"}, "yob1883": {"name": "yob1883", "num_bytes": 45221, "num_examples": 2084, "dataset_name": "md_gender_bias"}, "yob1884": {"name": "yob1884", "num_bytes": 49886, "num_examples": 2297, "dataset_name": "md_gender_bias"}, "yob1885": {"name": "yob1885", "num_bytes": 49810, "num_examples": 2294, "dataset_name": "md_gender_bias"}, "yob1886": {"name": "yob1886", "num_bytes": 51935, "num_examples": 2392, "dataset_name": "md_gender_bias"}, "yob1887": {"name": "yob1887", "num_bytes": 51458, "num_examples": 2373, "dataset_name": "md_gender_bias"}, "yob1888": {"name": "yob1888", "num_bytes": 57531, "num_examples": 2651, "dataset_name": "md_gender_bias"}, "yob1889": {"name": "yob1889", "num_bytes": 56177, "num_examples": 2590, "dataset_name": "md_gender_bias"}, "yob1890": {"name": "yob1890", "num_bytes": 58509, "num_examples": 2695, "dataset_name": "md_gender_bias"}, "yob1891": {"name": "yob1891", "num_bytes": 57767, "num_examples": 2660, "dataset_name": "md_gender_bias"}, "yob1892": {"name": "yob1892", "num_bytes": 63493, "num_examples": 2921, "dataset_name": "md_gender_bias"}, "yob1893": {"name": "yob1893", "num_bytes": 61525, "num_examples": 2831, "dataset_name": "md_gender_bias"}, "yob1894": {"name": "yob1894", "num_bytes": 63927, "num_examples": 2941, "dataset_name": "md_gender_bias"}, "yob1895": {"name": "yob1895", "num_bytes": 66346, "num_examples": 3049, "dataset_name": "md_gender_bias"}, "yob1896": {"name": "yob1896", "num_bytes": 67224, "num_examples": 3091, "dataset_name": "md_gender_bias"}, "yob1897": {"name": "yob1897", "num_bytes": 65886, "num_examples": 3028, "dataset_name": "md_gender_bias"}, "yob1898": {"name": "yob1898", "num_bytes": 71088, "num_examples": 3264, "dataset_name": "md_gender_bias"}, "yob1899": {"name": "yob1899", "num_bytes": 66225, "num_examples": 3042, "dataset_name": "md_gender_bias"}, "yob1900": {"name": "yob1900", "num_bytes": 81305, "num_examples": 3730, "dataset_name": "md_gender_bias"}, "yob1901": {"name": "yob1901", "num_bytes": 68723, "num_examples": 3153, "dataset_name": "md_gender_bias"}, "yob1902": {"name": "yob1902", "num_bytes": 73321, "num_examples": 3362, "dataset_name": "md_gender_bias"}, "yob1903": {"name": "yob1903", "num_bytes": 74019, "num_examples": 3389, "dataset_name": "md_gender_bias"}, "yob1904": {"name": "yob1904", "num_bytes": 77751, "num_examples": 3560, "dataset_name": "md_gender_bias"}, "yob1905": {"name": "yob1905", "num_bytes": 79802, "num_examples": 3655, "dataset_name": "md_gender_bias"}, "yob1906": {"name": "yob1906", "num_bytes": 79392, "num_examples": 3633, "dataset_name": "md_gender_bias"}, "yob1907": {"name": "yob1907", "num_bytes": 86342, "num_examples": 3948, "dataset_name": "md_gender_bias"}, "yob1908": {"name": "yob1908", "num_bytes": 87965, "num_examples": 4018, "dataset_name": "md_gender_bias"}, "yob1909": {"name": "yob1909", "num_bytes": 92591, "num_examples": 4227, "dataset_name": "md_gender_bias"}, "yob1910": {"name": "yob1910", "num_bytes": 101491, "num_examples": 4629, "dataset_name": "md_gender_bias"}, "yob1911": {"name": "yob1911", "num_bytes": 106787, "num_examples": 4867, "dataset_name": "md_gender_bias"}, "yob1912": {"name": "yob1912", "num_bytes": 139448, "num_examples": 6351, "dataset_name": "md_gender_bias"}, "yob1913": {"name": "yob1913", "num_bytes": 153110, "num_examples": 6968, "dataset_name": "md_gender_bias"}, "yob1914": {"name": "yob1914", "num_bytes": 175167, "num_examples": 7965, "dataset_name": "md_gender_bias"}, "yob1915": {"name": "yob1915", "num_bytes": 205921, "num_examples": 9357, "dataset_name": "md_gender_bias"}, "yob1916": {"name": "yob1916", "num_bytes": 213468, "num_examples": 9696, "dataset_name": "md_gender_bias"}, "yob1917": {"name": "yob1917", "num_bytes": 218446, "num_examples": 9913, "dataset_name": "md_gender_bias"}, "yob1918": {"name": "yob1918", "num_bytes": 229209, "num_examples": 10398, "dataset_name": "md_gender_bias"}, "yob1919": {"name": "yob1919", "num_bytes": 228656, "num_examples": 10369, "dataset_name": "md_gender_bias"}, "yob1920": {"name": "yob1920", "num_bytes": 237286, "num_examples": 10756, "dataset_name": "md_gender_bias"}, "yob1921": {"name": "yob1921", "num_bytes": 239616, "num_examples": 10857, "dataset_name": "md_gender_bias"}, "yob1922": {"name": "yob1922", "num_bytes": 237569, "num_examples": 10756, "dataset_name": "md_gender_bias"}, "yob1923": {"name": "yob1923", "num_bytes": 235046, "num_examples": 10643, "dataset_name": "md_gender_bias"}, "yob1924": {"name": "yob1924", "num_bytes": 240113, "num_examples": 10869, "dataset_name": "md_gender_bias"}, "yob1925": {"name": "yob1925", "num_bytes": 235098, "num_examples": 10638, "dataset_name": "md_gender_bias"}, "yob1926": {"name": "yob1926", "num_bytes": 230970, "num_examples": 10458, "dataset_name": "md_gender_bias"}, "yob1927": {"name": "yob1927", "num_bytes": 230004, "num_examples": 10406, "dataset_name": "md_gender_bias"}, "yob1928": {"name": "yob1928", "num_bytes": 224583, "num_examples": 10159, "dataset_name": "md_gender_bias"}, "yob1929": {"name": "yob1929", "num_bytes": 217057, "num_examples": 9820, "dataset_name": "md_gender_bias"}, "yob1930": {"name": "yob1930", "num_bytes": 216352, "num_examples": 9791, "dataset_name": "md_gender_bias"}, "yob1931": {"name": "yob1931", "num_bytes": 205361, "num_examples": 9298, "dataset_name": "md_gender_bias"}, "yob1932": {"name": "yob1932", "num_bytes": 207268, "num_examples": 9381, "dataset_name": "md_gender_bias"}, "yob1933": {"name": "yob1933", "num_bytes": 199031, "num_examples": 9013, "dataset_name": "md_gender_bias"}, "yob1934": {"name": "yob1934", "num_bytes": 202758, "num_examples": 9180, "dataset_name": "md_gender_bias"}, "yob1935": {"name": "yob1935", "num_bytes": 199614, "num_examples": 9037, "dataset_name": "md_gender_bias"}, "yob1936": {"name": "yob1936", "num_bytes": 196379, "num_examples": 8894, "dataset_name": "md_gender_bias"}, "yob1937": {"name": "yob1937", "num_bytes": 197757, "num_examples": 8946, "dataset_name": "md_gender_bias"}, "yob1938": {"name": "yob1938", "num_bytes": 199603, "num_examples": 9032, "dataset_name": "md_gender_bias"}, "yob1939": {"name": "yob1939", "num_bytes": 196979, "num_examples": 8918, "dataset_name": "md_gender_bias"}, "yob1940": {"name": "yob1940", "num_bytes": 198141, "num_examples": 8961, "dataset_name": "md_gender_bias"}, "yob1941": {"name": "yob1941", "num_bytes": 200858, "num_examples": 9085, "dataset_name": "md_gender_bias"}, "yob1942": {"name": "yob1942", "num_bytes": 208363, "num_examples": 9425, "dataset_name": "md_gender_bias"}, "yob1943": {"name": "yob1943", "num_bytes": 207940, "num_examples": 9408, "dataset_name": "md_gender_bias"}, "yob1944": {"name": "yob1944", "num_bytes": 202227, "num_examples": 9152, "dataset_name": "md_gender_bias"}, "yob1945": {"name": "yob1945", "num_bytes": 199478, "num_examples": 9025, "dataset_name": "md_gender_bias"}, "yob1946": {"name": "yob1946", "num_bytes": 214614, "num_examples": 9705, "dataset_name": "md_gender_bias"}, "yob1947": {"name": "yob1947", "num_bytes": 229327, "num_examples": 10371, "dataset_name": "md_gender_bias"}, "yob1948": {"name": "yob1948", "num_bytes": 226615, "num_examples": 10241, "dataset_name": "md_gender_bias"}, "yob1949": {"name": "yob1949", "num_bytes": 227278, "num_examples": 10269, "dataset_name": "md_gender_bias"}, "yob1950": {"name": "yob1950", "num_bytes": 227946, "num_examples": 10303, "dataset_name": "md_gender_bias"}, "yob1951": {"name": "yob1951", "num_bytes": 231613, "num_examples": 10462, "dataset_name": "md_gender_bias"}, "yob1952": {"name": "yob1952", "num_bytes": 235483, "num_examples": 10646, "dataset_name": "md_gender_bias"}, "yob1953": {"name": "yob1953", "num_bytes": 239654, "num_examples": 10837, "dataset_name": "md_gender_bias"}, "yob1954": {"name": "yob1954", "num_bytes": 242389, "num_examples": 10968, "dataset_name": "md_gender_bias"}, "yob1955": {"name": "yob1955", "num_bytes": 245652, "num_examples": 11115, "dataset_name": "md_gender_bias"}, "yob1956": {"name": "yob1956", "num_bytes": 250674, "num_examples": 11340, "dataset_name": "md_gender_bias"}, "yob1957": {"name": "yob1957", "num_bytes": 255370, "num_examples": 11564, "dataset_name": "md_gender_bias"}, "yob1958": {"name": "yob1958", "num_bytes": 254520, "num_examples": 11522, "dataset_name": "md_gender_bias"}, "yob1959": {"name": "yob1959", "num_bytes": 260051, "num_examples": 11767, "dataset_name": "md_gender_bias"}, "yob1960": {"name": "yob1960", "num_bytes": 263474, "num_examples": 11921, "dataset_name": "md_gender_bias"}, "yob1961": {"name": "yob1961", "num_bytes": 269493, "num_examples": 12182, "dataset_name": "md_gender_bias"}, "yob1962": {"name": "yob1962", "num_bytes": 270244, "num_examples": 12209, "dataset_name": "md_gender_bias"}, "yob1963": {"name": "yob1963", "num_bytes": 271872, "num_examples": 12282, "dataset_name": "md_gender_bias"}, "yob1964": {"name": "yob1964", "num_bytes": 274590, "num_examples": 12397, "dataset_name": "md_gender_bias"}, "yob1965": {"name": "yob1965", "num_bytes": 264889, "num_examples": 11952, "dataset_name": "md_gender_bias"}, "yob1966": {"name": "yob1966", "num_bytes": 269321, "num_examples": 12151, "dataset_name": "md_gender_bias"}, "yob1967": {"name": "yob1967", "num_bytes": 274867, "num_examples": 12397, "dataset_name": "md_gender_bias"}, "yob1968": {"name": "yob1968", "num_bytes": 286774, "num_examples": 12936, "dataset_name": "md_gender_bias"}, "yob1969": {"name": "yob1969", "num_bytes": 304909, "num_examples": 13749, "dataset_name": "md_gender_bias"}, "yob1970": {"name": "yob1970", "num_bytes": 328047, "num_examples": 14779, "dataset_name": "md_gender_bias"}, "yob1971": {"name": "yob1971", "num_bytes": 339657, "num_examples": 15295, "dataset_name": "md_gender_bias"}, "yob1972": {"name": "yob1972", "num_bytes": 342321, "num_examples": 15412, "dataset_name": "md_gender_bias"}, "yob1973": {"name": "yob1973", "num_bytes": 348414, "num_examples": 15682, "dataset_name": "md_gender_bias"}, "yob1974": {"name": "yob1974", "num_bytes": 361188, "num_examples": 16249, "dataset_name": "md_gender_bias"}, "yob1975": {"name": "yob1975", "num_bytes": 376491, "num_examples": 16944, "dataset_name": "md_gender_bias"}, "yob1976": {"name": "yob1976", "num_bytes": 386565, "num_examples": 17391, "dataset_name": "md_gender_bias"}, "yob1977": {"name": "yob1977", "num_bytes": 403994, "num_examples": 18175, "dataset_name": "md_gender_bias"}, "yob1978": {"name": "yob1978", "num_bytes": 405430, "num_examples": 18231, "dataset_name": "md_gender_bias"}, "yob1979": {"name": "yob1979", "num_bytes": 423423, "num_examples": 19039, "dataset_name": "md_gender_bias"}, "yob1980": {"name": "yob1980", "num_bytes": 432317, "num_examples": 19452, "dataset_name": "md_gender_bias"}, "yob1981": {"name": "yob1981", "num_bytes": 432980, "num_examples": 19475, "dataset_name": "md_gender_bias"}, "yob1982": {"name": "yob1982", "num_bytes": 437986, "num_examples": 19694, "dataset_name": "md_gender_bias"}, "yob1983": {"name": "yob1983", "num_bytes": 431531, "num_examples": 19407, "dataset_name": "md_gender_bias"}, "yob1984": {"name": "yob1984", "num_bytes": 434085, "num_examples": 19506, "dataset_name": "md_gender_bias"}, "yob1985": {"name": "yob1985", "num_bytes": 447113, "num_examples": 20085, "dataset_name": "md_gender_bias"}, "yob1986": {"name": "yob1986", "num_bytes": 460315, "num_examples": 20657, "dataset_name": "md_gender_bias"}, "yob1987": {"name": "yob1987", "num_bytes": 477677, "num_examples": 21406, "dataset_name": "md_gender_bias"}, "yob1988": {"name": "yob1988", "num_bytes": 499347, "num_examples": 22367, "dataset_name": "md_gender_bias"}, "yob1989": {"name": "yob1989", "num_bytes": 531020, "num_examples": 23775, "dataset_name": "md_gender_bias"}, "yob1990": {"name": "yob1990", "num_bytes": 552114, "num_examples": 24716, "dataset_name": "md_gender_bias"}, "yob1991": {"name": "yob1991", "num_bytes": 560932, "num_examples": 25109, "dataset_name": "md_gender_bias"}, "yob1992": {"name": "yob1992", "num_bytes": 568151, "num_examples": 25427, "dataset_name": "md_gender_bias"}, "yob1993": {"name": "yob1993", "num_bytes": 579778, "num_examples": 25966, "dataset_name": "md_gender_bias"}, "yob1994": {"name": "yob1994", "num_bytes": 580223, "num_examples": 25997, "dataset_name": "md_gender_bias"}, "yob1995": {"name": "yob1995", "num_bytes": 581949, "num_examples": 26080, "dataset_name": "md_gender_bias"}, "yob1996": {"name": "yob1996", "num_bytes": 589131, "num_examples": 26423, "dataset_name": "md_gender_bias"}, "yob1997": {"name": "yob1997", "num_bytes": 601284, "num_examples": 26970, "dataset_name": "md_gender_bias"}, "yob1998": {"name": "yob1998", "num_bytes": 621587, "num_examples": 27902, "dataset_name": "md_gender_bias"}, "yob1999": {"name": "yob1999", "num_bytes": 635355, "num_examples": 28552, "dataset_name": "md_gender_bias"}, "yob2000": {"name": "yob2000", "num_bytes": 662398, "num_examples": 29772, "dataset_name": "md_gender_bias"}, "yob2001": {"name": "yob2001", "num_bytes": 673111, "num_examples": 30274, "dataset_name": "md_gender_bias"}, "yob2002": {"name": "yob2002", "num_bytes": 679392, "num_examples": 30564, "dataset_name": "md_gender_bias"}, "yob2003": {"name": "yob2003", "num_bytes": 692931, "num_examples": 31185, "dataset_name": "md_gender_bias"}, "yob2004": {"name": "yob2004", "num_bytes": 711776, "num_examples": 32048, "dataset_name": "md_gender_bias"}, "yob2005": {"name": "yob2005", "num_bytes": 723065, "num_examples": 32549, "dataset_name": "md_gender_bias"}, "yob2006": {"name": "yob2006", "num_bytes": 757620, "num_examples": 34088, "dataset_name": "md_gender_bias"}, "yob2007": {"name": "yob2007", "num_bytes": 776893, "num_examples": 34961, "dataset_name": "md_gender_bias"}, "yob2008": {"name": "yob2008", "num_bytes": 779403, "num_examples": 35079, "dataset_name": "md_gender_bias"}, "yob2009": {"name": "yob2009", "num_bytes": 771032, "num_examples": 34709, "dataset_name": "md_gender_bias"}, "yob2010": {"name": "yob2010", "num_bytes": 756717, "num_examples": 34073, "dataset_name": "md_gender_bias"}, "yob2011": {"name": "yob2011", "num_bytes": 752804, "num_examples": 33908, "dataset_name": "md_gender_bias"}, "yob2012": {"name": "yob2012", "num_bytes": 748915, "num_examples": 33747, "dataset_name": "md_gender_bias"}, "yob2013": {"name": "yob2013", "num_bytes": 738288, "num_examples": 33282, "dataset_name": "md_gender_bias"}, "yob2014": {"name": "yob2014", "num_bytes": 737219, "num_examples": 33243, "dataset_name": "md_gender_bias"}, "yob2015": {"name": "yob2015", "num_bytes": 734183, "num_examples": 33121, "dataset_name": "md_gender_bias"}, "yob2016": {"name": "yob2016", "num_bytes": 731291, "num_examples": 33010, "dataset_name": "md_gender_bias"}, "yob2017": {"name": "yob2017", "num_bytes": 721444, "num_examples": 32590, "dataset_name": "md_gender_bias"}, "yob2018": {"name": "yob2018", "num_bytes": 708657, "num_examples": 32033, "dataset_name": "md_gender_bias"}}, "download_checksums": {"http://parl.ai/downloads/md_gender/gend_multiclass_10072020.tgz": {"num_bytes": 232629010, "checksum": "c2c03257c53497b9e453600201fc7245b55dec1d98965093b4657fdb54822e9d"}}, "download_size": 232629010, "post_processing_size": null, "dataset_size": 43393095, "size_in_bytes": 276022105}, "new_data": {"description": "Machine learning models are trained to find patterns in data.\nNLP models can inadvertently learn socially undesirable patterns when training on gender biased text.\nIn this work, we propose a general framework that decomposes gender bias in text along several pragmatic and semantic dimensions:\nbias from the gender of the person being spoken about, bias from the gender of the person being spoken to, and bias from the gender of the speaker.\nUsing this fine-grained framework, we automatically annotate eight large scale datasets with gender information.\nIn addition, we collect a novel, crowdsourced evaluation benchmark of utterance-level gender rewrites.\nDistinguishing between gender bias along multiple dimensions is important, as it enables us to train finer-grained gender bias classifiers.\nWe show our classifiers prove valuable for a variety of important applications, such as controlling for gender bias in generative models,\ndetecting gender bias in arbitrary text, and shed light on offensive language in terms of genderedness.\n", "citation": "@inproceedings{md_gender_bias,\n author = {Emily Dinan and\n Angela Fan and\n Ledell Wu and\n Jason Weston and\n Douwe Kiela and\n Adina Williams},\n editor = {Bonnie Webber and\n Trevor Cohn and\n Yulan He and\n Yang Liu},\n title = {Multi-Dimensional Gender Bias Classification},\n booktitle = {Proceedings of the 2020 Conference on Empirical Methods in Natural\n Language Processing, {EMNLP} 2020, Online, November 16-20, 2020},\n pages = {314--331},\n publisher = {Association for Computational Linguistics},\n year = {2020},\n url = {https://www.aclweb.org/anthology/2020.emnlp-main.23/}\n}\n", "homepage": "https://parl.ai/projects/md_gender/", "license": "MIT License", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "labels": [{"num_classes": 6, "names": ["ABOUT:female", "ABOUT:male", "PARTNER:female", "PARTNER:male", "SELF:female", "SELF:male"], "names_file": null, "id": null, "_type": "ClassLabel"}], "class_type": {"num_classes": 3, "names": ["about", "partner", "self"], "names_file": null, "id": null, "_type": "ClassLabel"}, "turker_gender": {"num_classes": 5, "names": ["man", "woman", "nonbinary", "prefer not to say", "no answer"], "names_file": null, "id": null, "_type": "ClassLabel"}, "episode_done": {"dtype": "bool_", "id": null, "_type": "Value"}, "confidence": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "md_gender_bias", "config_name": "new_data", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 369753, "num_examples": 2345, "dataset_name": "md_gender_bias"}}, "download_checksums": {"http://parl.ai/downloads/md_gender/gend_multiclass_10072020.tgz": {"num_bytes": 232629010, "checksum": "c2c03257c53497b9e453600201fc7245b55dec1d98965093b4657fdb54822e9d"}}, "download_size": 232629010, "post_processing_size": null, "dataset_size": 369753, "size_in_bytes": 232998763}, "funpedia": {"description": "Machine learning models are trained to find patterns in data.\nNLP models can inadvertently learn socially undesirable patterns when training on gender biased text.\nIn this work, we propose a general framework that decomposes gender bias in text along several pragmatic and semantic dimensions:\nbias from the gender of the person being spoken about, bias from the gender of the person being spoken to, and bias from the gender of the speaker.\nUsing this fine-grained framework, we automatically annotate eight large scale datasets with gender information.\nIn addition, we collect a novel, crowdsourced evaluation benchmark of utterance-level gender rewrites.\nDistinguishing between gender bias along multiple dimensions is important, as it enables us to train finer-grained gender bias classifiers.\nWe show our classifiers prove valuable for a variety of important applications, such as controlling for gender bias in generative models,\ndetecting gender bias in arbitrary text, and shed light on offensive language in terms of genderedness.\n", "citation": "@inproceedings{md_gender_bias,\n author = {Emily Dinan and\n Angela Fan and\n Ledell Wu and\n Jason Weston and\n Douwe Kiela and\n Adina Williams},\n editor = {Bonnie Webber and\n Trevor Cohn and\n Yulan He and\n Yang Liu},\n title = {Multi-Dimensional Gender Bias Classification},\n booktitle = {Proceedings of the 2020 Conference on Empirical Methods in Natural\n Language Processing, {EMNLP} 2020, Online, November 16-20, 2020},\n pages = {314--331},\n publisher = {Association for Computational Linguistics},\n year = {2020},\n url = {https://www.aclweb.org/anthology/2020.emnlp-main.23/}\n}\n", "homepage": "https://parl.ai/projects/md_gender/", "license": "MIT License", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "persona": {"dtype": "string", "id": null, "_type": "Value"}, "gender": {"num_classes": 3, "names": ["gender-neutral", "female", "male"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "md_gender_bias", "config_name": "funpedia", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3225542, "num_examples": 23897, "dataset_name": "md_gender_bias"}, "validation": {"name": "validation", "num_bytes": 402205, "num_examples": 2984, "dataset_name": "md_gender_bias"}, "test": {"name": "test", "num_bytes": 396417, "num_examples": 2938, "dataset_name": "md_gender_bias"}}, "download_checksums": {"http://parl.ai/downloads/md_gender/gend_multiclass_10072020.tgz": {"num_bytes": 232629010, "checksum": "c2c03257c53497b9e453600201fc7245b55dec1d98965093b4657fdb54822e9d"}}, "download_size": 232629010, "post_processing_size": null, "dataset_size": 4024164, "size_in_bytes": 236653174}, "image_chat": {"description": "Machine learning models are trained to find patterns in data.\nNLP models can inadvertently learn socially undesirable patterns when training on gender biased text.\nIn this work, we propose a general framework that decomposes gender bias in text along several pragmatic and semantic dimensions:\nbias from the gender of the person being spoken about, bias from the gender of the person being spoken to, and bias from the gender of the speaker.\nUsing this fine-grained framework, we automatically annotate eight large scale datasets with gender information.\nIn addition, we collect a novel, crowdsourced evaluation benchmark of utterance-level gender rewrites.\nDistinguishing between gender bias along multiple dimensions is important, as it enables us to train finer-grained gender bias classifiers.\nWe show our classifiers prove valuable for a variety of important applications, such as controlling for gender bias in generative models,\ndetecting gender bias in arbitrary text, and shed light on offensive language in terms of genderedness.\n", "citation": "@inproceedings{md_gender_bias,\n author = {Emily Dinan and\n Angela Fan and\n Ledell Wu and\n Jason Weston and\n Douwe Kiela and\n Adina Williams},\n editor = {Bonnie Webber and\n Trevor Cohn and\n Yulan He and\n Yang Liu},\n title = {Multi-Dimensional Gender Bias Classification},\n booktitle = {Proceedings of the 2020 Conference on Empirical Methods in Natural\n Language Processing, {EMNLP} 2020, Online, November 16-20, 2020},\n pages = {314--331},\n publisher = {Association for Computational Linguistics},\n year = {2020},\n url = {https://www.aclweb.org/anthology/2020.emnlp-main.23/}\n}\n", "homepage": "https://parl.ai/projects/md_gender/", "license": "MIT License", "features": {"caption": {"dtype": "string", "id": null, "_type": "Value"}, "id": {"dtype": "string", "id": null, "_type": "Value"}, "male": {"dtype": "bool_", "id": null, "_type": "Value"}, "female": {"dtype": "bool_", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "md_gender_bias", "config_name": "image_chat", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1061285, "num_examples": 9997, "dataset_name": "md_gender_bias"}, "validation": {"name": "validation", "num_bytes": 35868670, "num_examples": 338180, "dataset_name": "md_gender_bias"}, "test": {"name": "test", "num_bytes": 530126, "num_examples": 5000, "dataset_name": "md_gender_bias"}}, "download_checksums": {"http://parl.ai/downloads/md_gender/gend_multiclass_10072020.tgz": {"num_bytes": 232629010, "checksum": "c2c03257c53497b9e453600201fc7245b55dec1d98965093b4657fdb54822e9d"}}, "download_size": 232629010, "post_processing_size": null, "dataset_size": 37460081, "size_in_bytes": 270089091}, "wizard": {"description": "Machine learning models are trained to find patterns in data.\nNLP models can inadvertently learn socially undesirable patterns when training on gender biased text.\nIn this work, we propose a general framework that decomposes gender bias in text along several pragmatic and semantic dimensions:\nbias from the gender of the person being spoken about, bias from the gender of the person being spoken to, and bias from the gender of the speaker.\nUsing this fine-grained framework, we automatically annotate eight large scale datasets with gender information.\nIn addition, we collect a novel, crowdsourced evaluation benchmark of utterance-level gender rewrites.\nDistinguishing between gender bias along multiple dimensions is important, as it enables us to train finer-grained gender bias classifiers.\nWe show our classifiers prove valuable for a variety of important applications, such as controlling for gender bias in generative models,\ndetecting gender bias in arbitrary text, and shed light on offensive language in terms of genderedness.\n", "citation": "@inproceedings{md_gender_bias,\n author = {Emily Dinan and\n Angela Fan and\n Ledell Wu and\n Jason Weston and\n Douwe Kiela and\n Adina Williams},\n editor = {Bonnie Webber and\n Trevor Cohn and\n Yulan He and\n Yang Liu},\n title = {Multi-Dimensional Gender Bias Classification},\n booktitle = {Proceedings of the 2020 Conference on Empirical Methods in Natural\n Language Processing, {EMNLP} 2020, Online, November 16-20, 2020},\n pages = {314--331},\n publisher = {Association for Computational Linguistics},\n year = {2020},\n url = {https://www.aclweb.org/anthology/2020.emnlp-main.23/}\n}\n", "homepage": "https://parl.ai/projects/md_gender/", "license": "MIT License", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "chosen_topic": {"dtype": "string", "id": null, "_type": "Value"}, "gender": {"num_classes": 3, "names": ["gender-neutral", "female", "male"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "md_gender_bias", "config_name": "wizard", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1158785, "num_examples": 10449, "dataset_name": "md_gender_bias"}, "validation": {"name": "validation", "num_bytes": 57824, "num_examples": 537, "dataset_name": "md_gender_bias"}, "test": {"name": "test", "num_bytes": 53126, "num_examples": 470, "dataset_name": "md_gender_bias"}}, "download_checksums": {"http://parl.ai/downloads/md_gender/gend_multiclass_10072020.tgz": {"num_bytes": 232629010, "checksum": "c2c03257c53497b9e453600201fc7245b55dec1d98965093b4657fdb54822e9d"}}, "download_size": 232629010, "post_processing_size": null, "dataset_size": 1269735, "size_in_bytes": 233898745}, "convai2_inferred": {"description": "Machine learning models are trained to find patterns in data.\nNLP models can inadvertently learn socially undesirable patterns when training on gender biased text.\nIn this work, we propose a general framework that decomposes gender bias in text along several pragmatic and semantic dimensions:\nbias from the gender of the person being spoken about, bias from the gender of the person being spoken to, and bias from the gender of the speaker.\nUsing this fine-grained framework, we automatically annotate eight large scale datasets with gender information.\nIn addition, we collect a novel, crowdsourced evaluation benchmark of utterance-level gender rewrites.\nDistinguishing between gender bias along multiple dimensions is important, as it enables us to train finer-grained gender bias classifiers.\nWe show our classifiers prove valuable for a variety of important applications, such as controlling for gender bias in generative models,\ndetecting gender bias in arbitrary text, and shed light on offensive language in terms of genderedness.\n", "citation": "@inproceedings{md_gender_bias,\n author = {Emily Dinan and\n Angela Fan and\n Ledell Wu and\n Jason Weston and\n Douwe Kiela and\n Adina Williams},\n editor = {Bonnie Webber and\n Trevor Cohn and\n Yulan He and\n Yang Liu},\n title = {Multi-Dimensional Gender Bias Classification},\n booktitle = {Proceedings of the 2020 Conference on Empirical Methods in Natural\n Language Processing, {EMNLP} 2020, Online, November 16-20, 2020},\n pages = {314--331},\n publisher = {Association for Computational Linguistics},\n year = {2020},\n url = {https://www.aclweb.org/anthology/2020.emnlp-main.23/}\n}\n", "homepage": "https://parl.ai/projects/md_gender/", "license": "MIT License", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "binary_label": {"num_classes": 2, "names": ["ABOUT:female", "ABOUT:male"], "names_file": null, "id": null, "_type": "ClassLabel"}, "binary_score": {"dtype": "float32", "id": null, "_type": "Value"}, "ternary_label": {"num_classes": 3, "names": ["ABOUT:female", "ABOUT:male", "ABOUT:gender-neutral"], "names_file": null, "id": null, "_type": "ClassLabel"}, "ternary_score": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "md_gender_bias", "config_name": "convai2_inferred", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 9853669, "num_examples": 131438, "dataset_name": "md_gender_bias"}, "validation": {"name": "validation", "num_bytes": 608046, "num_examples": 7801, "dataset_name": "md_gender_bias"}, "test": {"name": "test", "num_bytes": 608046, "num_examples": 7801, "dataset_name": "md_gender_bias"}}, "download_checksums": {"http://parl.ai/downloads/md_gender/gend_multiclass_10072020.tgz": {"num_bytes": 232629010, "checksum": "c2c03257c53497b9e453600201fc7245b55dec1d98965093b4657fdb54822e9d"}}, "download_size": 232629010, "post_processing_size": null, "dataset_size": 11069761, "size_in_bytes": 243698771}, "light_inferred": {"description": "Machine learning models are trained to find patterns in data.\nNLP models can inadvertently learn socially undesirable patterns when training on gender biased text.\nIn this work, we propose a general framework that decomposes gender bias in text along several pragmatic and semantic dimensions:\nbias from the gender of the person being spoken about, bias from the gender of the person being spoken to, and bias from the gender of the speaker.\nUsing this fine-grained framework, we automatically annotate eight large scale datasets with gender information.\nIn addition, we collect a novel, crowdsourced evaluation benchmark of utterance-level gender rewrites.\nDistinguishing between gender bias along multiple dimensions is important, as it enables us to train finer-grained gender bias classifiers.\nWe show our classifiers prove valuable for a variety of important applications, such as controlling for gender bias in generative models,\ndetecting gender bias in arbitrary text, and shed light on offensive language in terms of genderedness.\n", "citation": "@inproceedings{md_gender_bias,\n author = {Emily Dinan and\n Angela Fan and\n Ledell Wu and\n Jason Weston and\n Douwe Kiela and\n Adina Williams},\n editor = {Bonnie Webber and\n Trevor Cohn and\n Yulan He and\n Yang Liu},\n title = {Multi-Dimensional Gender Bias Classification},\n booktitle = {Proceedings of the 2020 Conference on Empirical Methods in Natural\n Language Processing, {EMNLP} 2020, Online, November 16-20, 2020},\n pages = {314--331},\n publisher = {Association for Computational Linguistics},\n year = {2020},\n url = {https://www.aclweb.org/anthology/2020.emnlp-main.23/}\n}\n", "homepage": "https://parl.ai/projects/md_gender/", "license": "MIT License", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "binary_label": {"num_classes": 2, "names": ["ABOUT:female", "ABOUT:male"], "names_file": null, "id": null, "_type": "ClassLabel"}, "binary_score": {"dtype": "float32", "id": null, "_type": "Value"}, "ternary_label": {"num_classes": 3, "names": ["ABOUT:female", "ABOUT:male", "ABOUT:gender-neutral"], "names_file": null, "id": null, "_type": "ClassLabel"}, "ternary_score": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "md_gender_bias", "config_name": "light_inferred", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 10931355, "num_examples": 106122, "dataset_name": "md_gender_bias"}, "validation": {"name": "validation", "num_bytes": 679692, "num_examples": 6362, "dataset_name": "md_gender_bias"}, "test": {"name": "test", "num_bytes": 1375745, "num_examples": 12765, "dataset_name": "md_gender_bias"}}, "download_checksums": {"http://parl.ai/downloads/md_gender/gend_multiclass_10072020.tgz": {"num_bytes": 232629010, "checksum": "c2c03257c53497b9e453600201fc7245b55dec1d98965093b4657fdb54822e9d"}}, "download_size": 232629010, "post_processing_size": null, "dataset_size": 12986792, "size_in_bytes": 245615802}, "opensubtitles_inferred": {"description": "Machine learning models are trained to find patterns in data.\nNLP models can inadvertently learn socially undesirable patterns when training on gender biased text.\nIn this work, we propose a general framework that decomposes gender bias in text along several pragmatic and semantic dimensions:\nbias from the gender of the person being spoken about, bias from the gender of the person being spoken to, and bias from the gender of the speaker.\nUsing this fine-grained framework, we automatically annotate eight large scale datasets with gender information.\nIn addition, we collect a novel, crowdsourced evaluation benchmark of utterance-level gender rewrites.\nDistinguishing between gender bias along multiple dimensions is important, as it enables us to train finer-grained gender bias classifiers.\nWe show our classifiers prove valuable for a variety of important applications, such as controlling for gender bias in generative models,\ndetecting gender bias in arbitrary text, and shed light on offensive language in terms of genderedness.\n", "citation": "@inproceedings{md_gender_bias,\n author = {Emily Dinan and\n Angela Fan and\n Ledell Wu and\n Jason Weston and\n Douwe Kiela and\n Adina Williams},\n editor = {Bonnie Webber and\n Trevor Cohn and\n Yulan He and\n Yang Liu},\n title = {Multi-Dimensional Gender Bias Classification},\n booktitle = {Proceedings of the 2020 Conference on Empirical Methods in Natural\n Language Processing, {EMNLP} 2020, Online, November 16-20, 2020},\n pages = {314--331},\n publisher = {Association for Computational Linguistics},\n year = {2020},\n url = {https://www.aclweb.org/anthology/2020.emnlp-main.23/}\n}\n", "homepage": "https://parl.ai/projects/md_gender/", "license": "MIT License", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "binary_label": {"num_classes": 2, "names": ["ABOUT:female", "ABOUT:male"], "names_file": null, "id": null, "_type": "ClassLabel"}, "binary_score": {"dtype": "float32", "id": null, "_type": "Value"}, "ternary_label": {"num_classes": 3, "names": ["ABOUT:female", "ABOUT:male", "ABOUT:gender-neutral"], "names_file": null, "id": null, "_type": "ClassLabel"}, "ternary_score": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "md_gender_bias", "config_name": "opensubtitles_inferred", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 27966476, "num_examples": 351036, "dataset_name": "md_gender_bias"}, "validation": {"name": "validation", "num_bytes": 3363802, "num_examples": 41957, "dataset_name": "md_gender_bias"}, "test": {"name": "test", "num_bytes": 3830528, "num_examples": 49108, "dataset_name": "md_gender_bias"}}, "download_checksums": {"http://parl.ai/downloads/md_gender/gend_multiclass_10072020.tgz": {"num_bytes": 232629010, "checksum": "c2c03257c53497b9e453600201fc7245b55dec1d98965093b4657fdb54822e9d"}}, "download_size": 232629010, "post_processing_size": null, "dataset_size": 35160806, "size_in_bytes": 267789816}, "yelp_inferred": {"description": "Machine learning models are trained to find patterns in data.\nNLP models can inadvertently learn socially undesirable patterns when training on gender biased text.\nIn this work, we propose a general framework that decomposes gender bias in text along several pragmatic and semantic dimensions:\nbias from the gender of the person being spoken about, bias from the gender of the person being spoken to, and bias from the gender of the speaker.\nUsing this fine-grained framework, we automatically annotate eight large scale datasets with gender information.\nIn addition, we collect a novel, crowdsourced evaluation benchmark of utterance-level gender rewrites.\nDistinguishing between gender bias along multiple dimensions is important, as it enables us to train finer-grained gender bias classifiers.\nWe show our classifiers prove valuable for a variety of important applications, such as controlling for gender bias in generative models,\ndetecting gender bias in arbitrary text, and shed light on offensive language in terms of genderedness.\n", "citation": "@inproceedings{md_gender_bias,\n author = {Emily Dinan and\n Angela Fan and\n Ledell Wu and\n Jason Weston and\n Douwe Kiela and\n Adina Williams},\n editor = {Bonnie Webber and\n Trevor Cohn and\n Yulan He and\n Yang Liu},\n title = {Multi-Dimensional Gender Bias Classification},\n booktitle = {Proceedings of the 2020 Conference on Empirical Methods in Natural\n Language Processing, {EMNLP} 2020, Online, November 16-20, 2020},\n pages = {314--331},\n publisher = {Association for Computational Linguistics},\n year = {2020},\n url = {https://www.aclweb.org/anthology/2020.emnlp-main.23/}\n}\n", "homepage": "https://parl.ai/projects/md_gender/", "license": "MIT License", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "binary_label": {"num_classes": 2, "names": ["ABOUT:female", "ABOUT:male"], "names_file": null, "id": null, "_type": "ClassLabel"}, "binary_score": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "md_gender_bias", "config_name": "yelp_inferred", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 260582945, "num_examples": 2577862, "dataset_name": "md_gender_bias"}, "validation": {"name": "validation", "num_bytes": 324349, "num_examples": 4492, "dataset_name": "md_gender_bias"}, "test": {"name": "test", "num_bytes": 53887700, "num_examples": 534460, "dataset_name": "md_gender_bias"}}, "download_checksums": {"http://parl.ai/downloads/md_gender/gend_multiclass_10072020.tgz": {"num_bytes": 232629010, "checksum": "c2c03257c53497b9e453600201fc7245b55dec1d98965093b4657fdb54822e9d"}}, "download_size": 232629010, "post_processing_size": null, "dataset_size": 314794994, "size_in_bytes": 547424004}}