Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
10K<n<100K
Language Creators:
found
Annotations Creators:
no-annotation
Source Datasets:
original
License:
system HF staff commited on
Commit
425f84b
1 Parent(s): 56002d5

Update files from the datasets library (from 1.10.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.10.0

blog_authorship_corpus.py CHANGED
@@ -41,33 +41,14 @@ _URL = "https://u.cs.biu.ac.il/~koppel/BlogCorpus.htm"
41
  _DATA_URL = "http://www.cs.biu.ac.il/~koppel/blogs/blogs.zip"
42
 
43
 
44
- class BlogAuthorshipCorpusConfig(datasets.BuilderConfig):
45
- """BuilderConfig for BlogAuthorship."""
46
-
47
- def __init__(self, data_url, **kwargs):
48
- """BuilderConfig for BlogAuthorship
49
-
50
- Args:
51
- data_url: `string`, url to the dataset (word or raw level)
52
- **kwargs: keyword arguments forwarded to super.
53
- """
54
- super(BlogAuthorshipCorpusConfig, self).__init__(
55
- version=datasets.Version(
56
- "1.0.0",
57
- ),
58
- **kwargs,
59
- )
60
- self.data_url = data_url
61
-
62
-
63
  class BlogAuthorshipCorpus(datasets.GeneratorBasedBuilder):
64
  """TODO(BlogAuthorship): Short description of my dataset."""
65
 
66
- VERSION = datasets.Version("0.1.0")
67
  BUILDER_CONFIGS = [
68
- BlogAuthorshipCorpusConfig(
69
- name="blog-authorship-corpus",
70
- data_url=_DATA_URL,
71
  description="word level dataset. No processing is needed other than replacing newlines with <eos> tokens.",
72
  ),
73
  ]
@@ -98,71 +79,62 @@ class BlogAuthorshipCorpus(datasets.GeneratorBasedBuilder):
98
 
99
  def _split_generators(self, dl_manager):
100
  """Returns SplitGenerators."""
101
- if self.config.name == "blog-authorship-corpus":
102
- data = dl_manager.download_and_extract(self.config.data_url)
103
- data_dir = os.path.join(data, "blogs")
104
- files = sorted(glob.glob(os.path.join(data_dir, "*.xml")))
105
- train_files = []
106
- validation_files = []
107
-
108
- for i, file_path in enumerate(files):
109
- # 95% / 5% (train / val) split
110
- if i % 20 == 0:
111
- validation_files.append(file_path)
112
- else:
113
- train_files.append(file_path)
114
-
115
- return [
116
- datasets.SplitGenerator(
117
- name=datasets.Split.TRAIN,
118
- gen_kwargs={"files": train_files, "split": "train"},
119
- ),
120
- datasets.SplitGenerator(
121
- name=datasets.Split.VALIDATION,
122
- gen_kwargs={"files": validation_files, "split": "validation"},
123
- ),
124
- ]
125
- else:
126
- raise ValueError("{} does not exist".format(self.config.name))
127
 
128
  def _generate_examples(self, files, split):
129
  def parse_date(line):
130
  # parse line to date
131
  return line.strip().split("<date>")[-1].split("</date>")[0]
132
 
 
133
  for file_path in files:
134
- counter = 0
135
  file_name = os.path.basename(file_path)
136
  logger.info("generating examples from = %s", file_path)
137
  file_id, gender, age, job, horoscope = tuple(file_name.split(".")[:-1])
 
138
 
139
  # Note: import xml.etree.ElementTree as etree does not work. File cannot be parsed
140
  # use open instead
141
- with open(file_path, encoding="utf-8") as f:
142
- # some files are corrupted, so have to work with python`s try here
143
- try:
144
- date = ""
145
- for line in f:
146
- line = line.strip()
147
- if "<date>" in line:
148
- date = parse_date(line)
149
- elif line != "" and not line.startswith("<"):
150
- # need sub_id to be certain that no tf_records is identical
151
- sub_id = counter
152
- counter += 1
153
- if date == "":
154
- logger.warning("Date missing for {} in {}".format(line, file_name))
155
- assert date is not None, "Date is missing before {}".format(line)
156
- blog = {
157
- "text": line,
158
- "date": date,
159
- "gender": gender,
160
- "age": int(age),
161
- "job": job,
162
- "horoscope": horoscope,
163
- }
164
- yield "{}_{}_{}".format(file_id, sub_id, date), blog
165
- else:
166
- continue
167
- except UnicodeDecodeError as e:
168
- logger.warning("{} cannot be loaded. Error message: {}".format(file_path, e))
 
41
  _DATA_URL = "http://www.cs.biu.ac.il/~koppel/blogs/blogs.zip"
42
 
43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  class BlogAuthorshipCorpus(datasets.GeneratorBasedBuilder):
45
  """TODO(BlogAuthorship): Short description of my dataset."""
46
 
47
+ VERSION = datasets.Version("1.0.0")
48
  BUILDER_CONFIGS = [
49
+ datasets.BuilderConfig(
50
+ name="blog_authorship_corpus",
51
+ version=VERSION,
52
  description="word level dataset. No processing is needed other than replacing newlines with <eos> tokens.",
53
  ),
54
  ]
 
79
 
80
  def _split_generators(self, dl_manager):
81
  """Returns SplitGenerators."""
82
+ data = dl_manager.download_and_extract(_DATA_URL)
83
+ data_dir = os.path.join(data, "blogs")
84
+ files = sorted(glob.glob(os.path.join(data_dir, "*.xml")))
85
+ train_files = []
86
+ validation_files = []
87
+
88
+ for i, file_path in enumerate(files):
89
+ # 95% / 5% (train / val) split
90
+ if i % 20 == 0:
91
+ validation_files.append(file_path)
92
+ else:
93
+ train_files.append(file_path)
94
+
95
+ return [
96
+ datasets.SplitGenerator(
97
+ name=datasets.Split.TRAIN,
98
+ gen_kwargs={"files": train_files, "split": "train"},
99
+ ),
100
+ datasets.SplitGenerator(
101
+ name=datasets.Split.VALIDATION,
102
+ gen_kwargs={"files": validation_files, "split": "validation"},
103
+ ),
104
+ ]
 
 
 
105
 
106
  def _generate_examples(self, files, split):
107
  def parse_date(line):
108
  # parse line to date
109
  return line.strip().split("<date>")[-1].split("</date>")[0]
110
 
111
+ key = 0
112
  for file_path in files:
 
113
  file_name = os.path.basename(file_path)
114
  logger.info("generating examples from = %s", file_path)
115
  file_id, gender, age, job, horoscope = tuple(file_name.split(".")[:-1])
116
+ # TODO: yield also file_id?
117
 
118
  # Note: import xml.etree.ElementTree as etree does not work. File cannot be parsed
119
  # use open instead
120
+ with open(file_path, encoding="latin_1") as f:
121
+ date = ""
122
+ for line in f:
123
+ line = line.strip()
124
+ if "<date>" in line:
125
+ date = parse_date(line)
126
+ elif line != "" and not line.startswith("<"):
127
+ if date == "":
128
+ logger.warning("Date missing for {} in {}".format(line, file_name))
129
+ assert date is not None, "Date is missing before {}".format(line)
130
+ yield key, {
131
+ "text": line,
132
+ "date": date,
133
+ "gender": gender,
134
+ "age": int(age),
135
+ "job": job,
136
+ "horoscope": horoscope,
137
+ }
138
+ key += 1
139
+ else:
140
+ continue
 
 
 
 
 
 
 
dataset_infos.json CHANGED
@@ -1 +1 @@
1
- {"blog-authorship-corpus": {"description": "The Blog Authorship Corpus consists of the collected posts of 19,320 bloggers gathered from blogger.com in August 2004. The corpus incorporates a total of 681,288 posts and over 140 million words - or approximately 35 posts and 7250 words per person.\n\nEach blog is presented as a separate file, the name of which indicates a blogger id# and the blogger\u2019s self-provided gender, age, industry and astrological sign. (All are labeled for gender and age but for many, industry and/or sign is marked as unknown.)\n\nAll bloggers included in the corpus fall into one of three age groups:\n\n\u00b7 8240 \"10s\" blogs (ages 13-17),\n\n\u00b7 8086 \"20s\" blogs(ages 23-27)\n\n\u00b7 2994 \"30s\" blogs (ages 33-47).\n\nFor each age group there are an equal number of male and female bloggers.\n\nEach blog in the corpus includes at least 200 occurrences of common English words. All formatting has been stripped with two exceptions. Individual posts within a single blogger are separated by the date of the following post and links within a post are denoted by the label urllink.\n\nThe corpus may be freely used for non-commercial research purposes\n", "citation": "@inproceedings{schler2006effects,\n title={Effects of age and gender on blogging.},\n author={Schler, Jonathan and Koppel, Moshe and Argamon, Shlomo and Pennebaker, James W},\n booktitle={AAAI spring symposium: Computational approaches to analyzing weblogs},\n volume={6},\n pages={199--205},\n year={2006}\n}\n", "homepage": "https://u.cs.biu.ac.il/~koppel/BlogCorpus.htm", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "date": {"dtype": "string", "id": null, "_type": "Value"}, "gender": {"dtype": "string", "id": null, "_type": "Value"}, "age": {"dtype": "int32", "id": null, "_type": "Value"}, "horoscope": {"dtype": "string", "id": null, "_type": "Value"}, "job": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "blog_authorship_corpus", "config_name": "blog-authorship-corpus", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 610252351, "num_examples": 532812, "dataset_name": "blog_authorship_corpus"}, "validation": {"name": "validation", "num_bytes": 37500394, "num_examples": 31277, "dataset_name": "blog_authorship_corpus"}}, "download_checksums": {"http://www.cs.biu.ac.il/~koppel/blogs/blogs.zip": {"num_bytes": 312949121, "checksum": "1dfa6996663515a4baf8c1b71713ce8fe9a314b13778701447e4663bbc64c983"}}, "download_size": 312949121, "dataset_size": 647752745, "size_in_bytes": 960701866}}
 
1
+ {"blog_authorship_corpus": {"description": "The Blog Authorship Corpus consists of the collected posts of 19,320 bloggers gathered from blogger.com in August 2004. The corpus incorporates a total of 681,288 posts and over 140 million words - or approximately 35 posts and 7250 words per person.\n\nEach blog is presented as a separate file, the name of which indicates a blogger id# and the blogger\u2019s self-provided gender, age, industry and astrological sign. (All are labeled for gender and age but for many, industry and/or sign is marked as unknown.)\n\nAll bloggers included in the corpus fall into one of three age groups:\n\n\u00b7 8240 \"10s\" blogs (ages 13-17),\n\n\u00b7 8086 \"20s\" blogs(ages 23-27)\n\n\u00b7 2994 \"30s\" blogs (ages 33-47).\n\nFor each age group there are an equal number of male and female bloggers.\n\nEach blog in the corpus includes at least 200 occurrences of common English words. All formatting has been stripped with two exceptions. Individual posts within a single blogger are separated by the date of the following post and links within a post are denoted by the label urllink.\n\nThe corpus may be freely used for non-commercial research purposes\n", "citation": "@inproceedings{schler2006effects,\n title={Effects of age and gender on blogging.},\n author={Schler, Jonathan and Koppel, Moshe and Argamon, Shlomo and Pennebaker, James W},\n booktitle={AAAI spring symposium: Computational approaches to analyzing weblogs},\n volume={6},\n pages={199--205},\n year={2006}\n}\n", "homepage": "https://u.cs.biu.ac.il/~koppel/BlogCorpus.htm", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "date": {"dtype": "string", "id": null, "_type": "Value"}, "gender": {"dtype": "string", "id": null, "_type": "Value"}, "age": {"dtype": "int32", "id": null, "_type": "Value"}, "horoscope": {"dtype": "string", "id": null, "_type": "Value"}, "job": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "blog_authorship_corpus", "config_name": "blog_authorship_corpus", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 753833081, "num_examples": 689793, "dataset_name": "blog_authorship_corpus"}, "validation": {"name": "validation", "num_bytes": 41236028, "num_examples": 37919, "dataset_name": "blog_authorship_corpus"}}, "download_checksums": {"http://www.cs.biu.ac.il/~koppel/blogs/blogs.zip": {"num_bytes": 312949121, "checksum": "1dfa6996663515a4baf8c1b71713ce8fe9a314b13778701447e4663bbc64c983"}}, "download_size": 312949121, "post_processing_size": null, "dataset_size": 795069109, "size_in_bytes": 1108018230}}
dummy/{blog-authorship-corpus → blog_authorship_corpus}/1.0.0/dummy_data.zip RENAMED
File without changes