Zeb commited on
Commit
a966ae1
1 Parent(s): 0e4956d

Add scripts to tag data and improve cleaning

Browse files
Files changed (3) hide show
  1. BabyLM.py +29 -6
  2. clean_data.py +53 -32
  3. tag_data.py +149 -0
BabyLM.py CHANGED
@@ -36,6 +36,16 @@ class BabyLM(datasets.GeneratorBasedBuilder):
36
  description="Full version of the dataset with 100M words",
37
  version="1.0.0",
38
  )
 
 
 
 
 
 
 
 
 
 
39
  ]
40
 
41
  DEFAULT_CONFIG_NAME = "strict_small"
@@ -44,6 +54,7 @@ class BabyLM(datasets.GeneratorBasedBuilder):
44
  features = datasets.Features(
45
  {
46
  "text": datasets.Value("string"),
 
47
  "filename": datasets.Value("string"),
48
  }
49
  )
@@ -59,16 +70,20 @@ class BabyLM(datasets.GeneratorBasedBuilder):
59
  """
60
  Returns data for different splits
61
  """
62
-
63
  if self.config.name == "strict_small":
64
  train_data_dir = "10M"
65
  else:
66
  train_data_dir = "100M"
 
 
 
 
67
 
68
  urls_to_download = {
69
- "train": [f"clean/{train_data_dir}/{fn}" for fn in filenames],
70
- "dev": [f"clean/dev/{fn}" for fn in filenames],
71
- "test": [f"clean/test/{fn}" for fn in filenames]
72
  }
73
 
74
  downloaded_files = dl_manager.download_and_extract(urls_to_download)
@@ -108,6 +123,14 @@ class BabyLM(datasets.GeneratorBasedBuilder):
108
  for filepath in filepaths:
109
  with open(filepath, encoding="utf-8") as f:
110
  filename = filepath.split("/")[-1]
 
 
 
111
  for row in f:
112
- yield global_idx, {"text": row, "filename": filename}
113
- global_idx += 1
 
 
 
 
 
 
36
  description="Full version of the dataset with 100M words",
37
  version="1.0.0",
38
  )
39
+ datasets.BuilderConfig(
40
+ name="strict_small_gold",
41
+ description="Small version of the dataset with 10M words and gold POS tags",
42
+ version="1.0.0",
43
+ ),
44
+ datasets.BuilderConfig(
45
+ name="strict_gold",
46
+ description="Full version of the dataset with 100M words and gold POS tags",
47
+ version="1.0.0",
48
+ )
49
  ]
50
 
51
  DEFAULT_CONFIG_NAME = "strict_small"
 
54
  features = datasets.Features(
55
  {
56
  "text": datasets.Value("string"),
57
+ "tagged_text": datasets.Value("string"),
58
  "filename": datasets.Value("string"),
59
  }
60
  )
 
70
  """
71
  Returns data for different splits
72
  """
73
+
74
  if self.config.name == "strict_small":
75
  train_data_dir = "10M"
76
  else:
77
  train_data_dir = "100M"
78
+ if 'gold' in self.config.name:
79
+ folder = 'tagged_gold'
80
+ else:
81
+ folder = 'tagged'
82
 
83
  urls_to_download = {
84
+ "train": [f"{folder}/{train_data_dir}/{fn}" for fn in filenames],
85
+ "dev": [f"{folder}/dev/{fn}" for fn in filenames],
86
+ "test": [f"{folder}/test/{fn}" for fn in filenames]
87
  }
88
 
89
  downloaded_files = dl_manager.download_and_extract(urls_to_download)
 
123
  for filepath in filepaths:
124
  with open(filepath, encoding="utf-8") as f:
125
  filename = filepath.split("/")[-1]
126
+ is_tags = False
127
+ text = ""
128
+ # Every other row contains POS tags
129
  for row in f:
130
+ if is_tags:
131
+ yield global_idx, {"text": text, "tagged_text": row, "filename": filename}
132
+ global_idx += 1
133
+ is_tags = False
134
+ else:
135
+ text = row
136
+ is_tags = True
clean_data.py CHANGED
@@ -5,17 +5,24 @@ import re
5
  from nltk import tokenize
6
 
7
  def clean_aochildes(lines):
8
- """ For aochildes, we just remove the space between the punctuation mark and the final word """
9
  new_lines = []
10
- for line in lines:
11
- new_lines.append(line[:-3] + line[-2:])
 
 
 
 
 
12
  return new_lines
13
 
14
  def clean_bnc_spoken(lines):
15
  """ For bnc_spoken, we lowercase """
16
  new_lines = []
17
  for line in lines:
18
- new_lines.append(line.lower())
 
 
19
  return new_lines
20
 
21
  def clean_cbt(lines):
@@ -38,15 +45,16 @@ def clean_cbt(lines):
38
  return new_lines
39
 
40
  def clean_children_stories(lines):
41
- """ For children_stories, we lowercase and split long lines into sentences """
42
  new_lines = []
43
  for line in lines:
44
- sentences = [s + '\n' for s in tokenize.sent_tokenize(line.lower().strip()) if s != '']
45
- new_lines.extend(sentences)
 
46
  return new_lines
47
 
48
  def clean_gutenberg(lines):
49
- """ For gutenberg, we lowercase, remove italics, group lines into paragraphs and then split into sentences. We also remove any lines containing '*' or 'p.' """
50
  # Get paragraphs
51
  paragraphs = []
52
  paragraph = ""
@@ -54,31 +62,27 @@ def clean_gutenberg(lines):
54
  # Remove italics
55
  tmp_line = line.lower().strip().replace('_','')
56
  if tmp_line == "" and paragraph != "":
57
- paragraphs.append(paragraph[:-1] + '\n')
 
58
  paragraph = ""
59
  else:
60
  paragraph += tmp_line + " "
61
 
62
  # Bad characters - gutenberg has a lot of figures, footnotes, chapter names etc that we want to remove
63
  bad_chars = ['*', 'p.', '=', '|', '[', ']', ' ', ' ', 'v.']
64
-
65
- # Split into sentences using NLTK
66
- new_lines = []
67
- for paragraph in paragraphs:
68
- sentences = [s + '\n' for s in tokenize.sent_tokenize(paragraph) if s != '' and s[0] != '(']
69
- sentences = [s for s in sentences if not any([c in s for c in bad_chars])]
70
- if len(sentences) > 0:
71
- new_lines.extend(sentences)
72
  return new_lines
73
 
74
  def clean_open_subtitles(lines):
75
- """ For open_subtitles, we lowercase, remove subtitle dashes and fix the lowercase 'l' problem """
76
  punctuation = ['.', ',', '?', '!', ':', ';', '(', ')', '[', ']', '{', '}', '"', "'", '“', '”', '—', '–', ' ', '\n']
77
  new_lines = []
 
 
78
  for line in lines:
79
  new_line = line.lower()
80
  # Skip music lines
81
- if '♪' in new_line or '[' in new_line or ']' in new_line:
82
  continue
83
  if new_line[0:2] in ["- ", "– ", "— "]:
84
  new_line = new_line[2:]
@@ -100,14 +104,21 @@ def clean_open_subtitles(lines):
100
  new_line = new_line.replace(' lt', ' it')
101
  new_line = new_line.replace(' lt', ' it')
102
  new_line = new_line.replace(' lv', ' iv')
103
- new_lines.append(new_line.strip() + '\n')
 
 
 
 
 
104
  return new_lines
105
 
106
  def clean_qed(lines):
107
  """ For qed, we lowercase and normalise punctuation, remove words contained in parentheses,
108
- remove lines that arejust character's names and fix the lowercase 'l' problem"""
109
 
110
  new_lines = []
 
 
111
  for line in lines:
112
  # Before lowercasing, check if the words in the line are uppercase containing lowercase 'l' instead of 'I' and fix accordingly
113
  words = line.split()
@@ -150,11 +161,15 @@ def clean_qed(lines):
150
 
151
  new_line = new_line.strip()
152
  if new_line != "":
153
- new_lines.append(new_line + '\n')
 
 
 
 
154
  return new_lines
155
 
156
  def clean_simple_wikipedia(lines):
157
- """ For simple_wikipedia, we lowercase, remove empty lines and article names and split paragraphs into sentences."""
158
  new_lines = []
159
  next_line_is_article_name = False
160
  for line in lines:
@@ -164,20 +179,26 @@ def clean_simple_wikipedia(lines):
164
  if line.strip() == "":
165
  next_line_is_article_name = True
166
  continue
167
- sentences = [s + '\n' for s in tokenize.sent_tokenize(line.lower()) if s != '']
168
- new_lines.extend(sentences)
169
  return new_lines
170
 
171
  def clean_switchboard(lines):
172
- """ For switchboard, we lowercase """
173
  new_lines = []
 
 
174
  for line in lines:
175
- new_line = line.lower()
176
- new_lines.append(new_line)
 
 
 
 
177
  return new_lines
178
 
179
  def clean_wikipedia(lines):
180
- """ For wikipedia, we lowercase, remove empty lines and article names and split paragraphs into sentences.
181
  We also remove lines that seem to be figure names or table entries. """
182
  new_lines = []
183
  for line in lines:
@@ -201,9 +222,7 @@ def clean_wikipedia(lines):
201
  if all_numeric or all_uppercase:
202
  continue
203
 
204
- # Split into sentences using NLTK
205
- sentences = [s + '\n' for s in tokenize.sent_tokenize(new_line.lower()) if s != '']
206
- new_lines.extend(sentences)
207
  return new_lines
208
 
209
  CLEAN_FUNCTIONS = {'aochildes' : clean_aochildes, 'bnc_spoken' : clean_bnc_spoken, 'cbt' : clean_cbt, 'children_stories' : clean_children_stories, 'gutenberg' : clean_gutenberg, 'open_subtitles' : clean_open_subtitles, 'qed' : clean_qed, 'simple_wikipedia' : clean_simple_wikipedia, 'switchboard' : clean_switchboard, 'wikipedia' : clean_wikipedia}
@@ -230,6 +249,8 @@ if __name__ == "__main__":
230
  # Clean the data
231
  if CLEAN_FUNCTIONS[corpus_name] is not None:
232
  lines = CLEAN_FUNCTIONS[corpus_name](lines)
 
 
233
 
234
  # Write the new file
235
  new_file = file.replace('original', 'clean')
 
5
  from nltk import tokenize
6
 
7
  def clean_aochildes(lines):
8
+ """ For aochildes, we remove the space between the punctuation mark and the final word and join together every 5 lines """
9
  new_lines = []
10
+ joined = []
11
+ for i, line in enumerate(lines):
12
+ new_line = line[:-3] + line[-2:]
13
+ joined.append(new_line.strip())
14
+ if i % 5 == 0:
15
+ new_lines.append(" ".join(joined) + "\n")
16
+ joined = []
17
  return new_lines
18
 
19
  def clean_bnc_spoken(lines):
20
  """ For bnc_spoken, we lowercase """
21
  new_lines = []
22
  for line in lines:
23
+ new_line = line.lower()
24
+ if new_line != '\n':
25
+ new_lines.append(new_line)
26
  return new_lines
27
 
28
  def clean_cbt(lines):
 
45
  return new_lines
46
 
47
  def clean_children_stories(lines):
48
+ """ For children_stories, we lowercase """
49
  new_lines = []
50
  for line in lines:
51
+ new_line = line.lower().strip()
52
+ if new_line != '':
53
+ new_lines.append(new_line + "\n")
54
  return new_lines
55
 
56
  def clean_gutenberg(lines):
57
+ """ For gutenberg, we lowercase, remove italics and group lines into paragraphs. We also remove any lines containing '*' or 'p.' """
58
  # Get paragraphs
59
  paragraphs = []
60
  paragraph = ""
 
62
  # Remove italics
63
  tmp_line = line.lower().strip().replace('_','')
64
  if tmp_line == "" and paragraph != "":
65
+ if len(paragraph.split()) > 2 and not paragraph.split()[-1][-1].isnumeric(): # Remove paragraphs with less than 3 words and those that end in a number (probably part of a bibliography)
66
+ paragraphs.append(paragraph[:-1] + '\n')
67
  paragraph = ""
68
  else:
69
  paragraph += tmp_line + " "
70
 
71
  # Bad characters - gutenberg has a lot of figures, footnotes, chapter names etc that we want to remove
72
  bad_chars = ['*', 'p.', '=', '|', '[', ']', ' ', ' ', 'v.']
73
+ new_lines = [p.strip()+'\n' for p in paragraphs if not any([c in p for c in bad_chars]) and p != '' and p != '\n' and p[0] != '(']
 
 
 
 
 
 
 
74
  return new_lines
75
 
76
  def clean_open_subtitles(lines):
77
+ """ For open_subtitles, we lowercase, remove subtitle dashes and fix the lowercase 'l' problem. We also join every 5 lines. """
78
  punctuation = ['.', ',', '?', '!', ':', ';', '(', ')', '[', ']', '{', '}', '"', "'", '“', '”', '—', '–', ' ', '\n']
79
  new_lines = []
80
+ joined = []
81
+ count = 0
82
  for line in lines:
83
  new_line = line.lower()
84
  # Skip music lines
85
+ if '♪' in new_line or '[' in new_line or ']' in new_line or '‎' in new_line:
86
  continue
87
  if new_line[0:2] in ["- ", "– ", "— "]:
88
  new_line = new_line[2:]
 
104
  new_line = new_line.replace(' lt', ' it')
105
  new_line = new_line.replace(' lt', ' it')
106
  new_line = new_line.replace(' lv', ' iv')
107
+ if new_line.strip() != '':
108
+ joined.append(new_line.strip())
109
+ count += 1
110
+ if count % 5 == 0:
111
+ new_lines.append(" ".join(joined) + '\n')
112
+ joined = []
113
  return new_lines
114
 
115
  def clean_qed(lines):
116
  """ For qed, we lowercase and normalise punctuation, remove words contained in parentheses,
117
+ remove lines that are just character's names and fix the lowercase 'l' problem. We also join every 5 lines. """
118
 
119
  new_lines = []
120
+ count = 0
121
+ joined = []
122
  for line in lines:
123
  # Before lowercasing, check if the words in the line are uppercase containing lowercase 'l' instead of 'I' and fix accordingly
124
  words = line.split()
 
161
 
162
  new_line = new_line.strip()
163
  if new_line != "":
164
+ joined.append(new_line)
165
+ count += 1
166
+ if count % 5 == 0:
167
+ new_lines.append(" ".join(joined) + '\n')
168
+ joined = []
169
  return new_lines
170
 
171
  def clean_simple_wikipedia(lines):
172
+ """ For simple_wikipedia, we lowercase, remove empty lines and article names."""
173
  new_lines = []
174
  next_line_is_article_name = False
175
  for line in lines:
 
179
  if line.strip() == "":
180
  next_line_is_article_name = True
181
  continue
182
+ if len(line.split()) > 2:
183
+ new_lines.append(line.lower())
184
  return new_lines
185
 
186
  def clean_switchboard(lines):
187
+ """ For switchboard, we lowercase and join every 5 lines. """
188
  new_lines = []
189
+ count = 0
190
+ joined = []
191
  for line in lines:
192
+ new_line = line.lower().strip()
193
+ joined.append(new_line)
194
+ count += 1
195
+ if count % 10 == 0:
196
+ new_lines.append(" ".join(joined) + '\n')
197
+ joined = []
198
  return new_lines
199
 
200
  def clean_wikipedia(lines):
201
+ """ For wikipedia, we lowercase and remove empty lines and article names.
202
  We also remove lines that seem to be figure names or table entries. """
203
  new_lines = []
204
  for line in lines:
 
222
  if all_numeric or all_uppercase:
223
  continue
224
 
225
+ new_lines.append(new_line.lower().strip() + '\n')
 
 
226
  return new_lines
227
 
228
  CLEAN_FUNCTIONS = {'aochildes' : clean_aochildes, 'bnc_spoken' : clean_bnc_spoken, 'cbt' : clean_cbt, 'children_stories' : clean_children_stories, 'gutenberg' : clean_gutenberg, 'open_subtitles' : clean_open_subtitles, 'qed' : clean_qed, 'simple_wikipedia' : clean_simple_wikipedia, 'switchboard' : clean_switchboard, 'wikipedia' : clean_wikipedia}
 
249
  # Clean the data
250
  if CLEAN_FUNCTIONS[corpus_name] is not None:
251
  lines = CLEAN_FUNCTIONS[corpus_name](lines)
252
+ # Replace multiple spaces with single space
253
+ lines = [re.sub(' +', ' ', line) for line in lines if line.strip() != '']
254
 
255
  # Write the new file
256
  new_file = file.replace('original', 'clean')
tag_data.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Script used to tag the data with POS tags. """
2
+
3
+ import os
4
+ import re
5
+ from transformers import AutoTokenizer
6
+
7
+ import nltk, sys
8
+
9
+ UNSUPERVISED_POS_TAG_MAP = {
10
+ "and" : 'CONJ',
11
+ "|" : 'NOUN',
12
+ "states" : 'NOUN',
13
+ "school" : 'NOUN',
14
+ ".\"" : '.',
15
+ "-" : '.',
16
+ "five" : 'NUM',
17
+ "1" : 'NUM',
18
+ "they" : 'PRON',
19
+ "of" : 'ADP',
20
+ "are" : 'VERB',
21
+ "(" : '.',
22
+ "american" : 'ADJ',
23
+ "'s" : 'VERB',
24
+ "\"" : 'NOUN',
25
+ "the" : 'DET',
26
+ "a" : 'DET',
27
+ "after" : 'ADP',
28
+ "th" : 'NOUN',
29
+ "good" : 'ADJ',
30
+ "her" : 'PRON',
31
+ "night" : 'NOUN',
32
+ "to" : 'PRT',
33
+ "used" : 'VERB',
34
+ "," : '.',
35
+ "sir" : 'NOUN',
36
+ "tell" : 'VERB',
37
+ "lot" : 'NOUN',
38
+ "amp" : 'NOUN',
39
+ "doing" : 'VERB'
40
+ }
41
+
42
+ def tag_with_nltk(text, en_ptb_map):
43
+ """ Given a list of text, tag each word with its POS tag using NLTK """
44
+ new_lines = []
45
+ for line in text:
46
+ tokens = line.split()
47
+ tagged = nltk.pos_tag(tokens)
48
+ # Map the NLTK PTB tags to the universal tags
49
+ tagged = [(token, en_ptb_map[tag]) for (token, tag) in tagged]
50
+ new_lines.append(tagged)
51
+ return new_lines
52
+
53
+ def write_to_file(tagged, output_file):
54
+ """ Given a list of tagged lines, write them to the given output file """
55
+ with open(output_file, 'w') as f:
56
+ for line in tagged:
57
+ for token, tag in line:
58
+ f.write(f'{token}__<label>__{tag} ')
59
+ f.write('\n')
60
+
61
+ def tokenize_lines(text, tokenizer):
62
+ new_lines = []
63
+ for line in text:
64
+ tokens = tokenizer.backend_tokenizer.pre_tokenizer.pre_tokenize_str(line)
65
+ tokens = [t[0].replace("Ġ", "").replace('Ċ','\n') for t in tokens]
66
+ new_lines.append(' '.join(tokens))
67
+ return new_lines
68
+
69
+ def get_tags_from_file(file):
70
+ with open(file, 'r') as f:
71
+ lines = f.read().splitlines()
72
+
73
+ gold_tagged_lines = []
74
+ pred_tagged_lines = []
75
+ gold_tagged = []
76
+ pred_tagged = []
77
+ total = 0
78
+ correct = 0
79
+ for line in lines:
80
+ if line == '':
81
+ gold_tagged_lines.append(gold_tagged)
82
+ pred_tagged_lines.append(pred_tagged)
83
+ gold_tagged = []
84
+ pred_tagged = []
85
+ else:
86
+ token, gold_tag, _, pred_tag = line.strip().split(' ')
87
+ gold_tagged.append((token, gold_tag))
88
+ # Use the manual map to map the predicted tags to the universal tags
89
+ pred_tagged.append((token, UNSUPERVISED_POS_TAG_MAP[pred_tag]))
90
+ total += 1
91
+ if gold_tag == UNSUPERVISED_POS_TAG_MAP[pred_tag]:
92
+ correct += 1
93
+ print(f' Unsupervised Tagging Accuracy: {correct/total}')
94
+
95
+ return gold_tagged_lines, pred_tagged_lines
96
+
97
+ def write_tagged_lines(filename, text, tagged_lines):
98
+ with open(filename, 'w') as f:
99
+ for line, tagged in zip(text, tagged_lines):
100
+ f.write(line)
101
+ f.write(' '.join([f'{token}__<label>__{tag}' for token, tag in tagged]) + '\n')
102
+
103
+ tokenizer = AutoTokenizer.from_pretrained("CamBabyTrainers/BabyBERTa-3-8192-tokenizer")
104
+
105
+ FOLDERS = ['10M', '100M', 'dev', 'test']
106
+
107
+ if __name__ == "__main__":
108
+
109
+ # Read all text files from directory "BabyLM"
110
+ all_files = []
111
+ for folder in FOLDERS:
112
+ for root, dirs, files in os.walk(f"clean/{folder}"):
113
+ for file in files:
114
+ if file.endswith(".txt"):
115
+ all_files.append(os.path.join(root, file))
116
+
117
+ # Get map from PTB tags to universal tags
118
+ en_ptb_map = {}
119
+ with open('../pos_tagging/en-ptb.map', 'r') as f:
120
+ for line in f.readlines():
121
+ (key, val) = line.split()
122
+ en_ptb_map[key] = val
123
+
124
+ for file in all_files:
125
+ print(file)
126
+ with open(file, 'r') as f:
127
+ lines = f.readlines()
128
+
129
+ # 1. Tokenize the lines in the text, tag with universal tags and write to tmp file
130
+ tokenized = tokenize_lines(lines, tokenizer)
131
+ tagged = tag_with_nltk(tokenized, en_ptb_map)
132
+ write_to_file(tagged, 'tmp.txt')
133
+
134
+ # 2. Run the unsupervised tagger on the tmp file
135
+ os.system(f'./../anchor/hmm --output ../pos_tagging/10M_train_30_extended --data tmp.txt --pred tmp_tagged.txt')
136
+
137
+ # 3. Get the gold tags and predicted tags
138
+ gold_tagged_lines, pred_tagged_lines = get_tags_from_file('tmp_tagged.txt')
139
+
140
+ assert len(gold_tagged_lines) == len(pred_tagged_lines) == len(lines)
141
+
142
+ # 4. Write the tagged lines to the original file
143
+ new_file = file.replace('clean', 'tagged')
144
+ os.makedirs(os.path.dirname(new_file), exist_ok=True)
145
+ write_tagged_lines(new_file, lines, pred_tagged_lines)
146
+
147
+ new_file = file.replace('clean', 'tagged_gold')
148
+ os.makedirs(os.path.dirname(new_file), exist_ok=True)
149
+ write_tagged_lines(new_file, lines, gold_tagged_lines)