BabyLM / tag_data.py
Zeb
Update tagging and subconfigs
b98cdd4
""" Script used to tag the data with POS tags. """
import os
import re
from transformers import AutoTokenizer
import nltk, sys
TOKENIZER_NAME = 'cambridge-climb/CamBabyTokenizer-8192'
UNSUPERVISED_POS_TAG_MAP = {
"and" : 'CONJ',
"|" : 'NOUN',
"states" : 'NOUN',
"school" : 'NOUN',
".\"" : '.',
"-" : '.',
"five" : 'NUM',
"1" : 'NUM',
"they" : 'PRON',
"of" : 'ADP',
"are" : 'VERB',
"(" : '.',
"american" : 'ADJ',
"'s" : 'VERB',
"\"" : 'NOUN',
"the" : 'DET',
"a" : 'DET',
"after" : 'ADP',
"th" : 'NOUN',
"good" : 'ADJ',
"her" : 'PRON',
"night" : 'NOUN',
"to" : 'PRT',
"used" : 'VERB',
"," : '.',
"sir" : 'NOUN',
"tell" : 'VERB',
"lot" : 'NOUN',
"amp" : 'NOUN',
"doing" : 'VERB'
}
def tag_with_nltk(text, en_ptb_map):
""" Given a list of text, tag each word with its POS tag using NLTK """
new_lines = []
for line in text:
tokens = line.split()
tagged = nltk.pos_tag(tokens)
# Map the NLTK PTB tags to the universal tags
tagged = [(token, en_ptb_map[tag]) for (token, tag) in tagged]
new_lines.append(tagged)
return new_lines
def write_to_file(tagged, output_file):
""" Given a list of tagged lines, write them to the given output file """
with open(output_file, 'w') as f:
for line in tagged:
for token, tag in line:
f.write(f'{token}__<label>__{tag} ')
f.write('\n')
def tokenize_lines(text, tokenizer):
new_lines = []
for line in text:
tokens = tokenizer.backend_tokenizer.pre_tokenizer.pre_tokenize_str(line)
tokens = [t[0].replace("Ġ", "").replace('Ċ','\n') for t in tokens]
new_lines.append(' '.join(tokens))
return new_lines
def get_tags_from_file(file):
with open(file, 'r') as f:
lines = f.readlines()
gold_tagged_lines = []
pred_tagged_lines = []
gold_tagged = []
pred_tagged = []
total = 0
correct = 0
for line in lines:
line = line.strip()
if line == '':
gold_tagged_lines.append(gold_tagged)
pred_tagged_lines.append(pred_tagged)
gold_tagged = []
pred_tagged = []
else:
token, gold_tag, _, pred_tag = line.strip().split(' ')
gold_tagged.append((token, gold_tag))
# Use the manual map to map the predicted tags to the universal tags
pred_tagged.append((token, UNSUPERVISED_POS_TAG_MAP[pred_tag]))
total += 1
if gold_tag == UNSUPERVISED_POS_TAG_MAP[pred_tag]:
correct += 1
print(f' Unsupervised Tagging Accuracy: {correct/total}')
return gold_tagged_lines, pred_tagged_lines
def write_tagged_lines(filename, text, tagged_lines):
with open(filename, 'w') as f:
# Write the filename as the first line
f.write(filename.split('/')[-1] + '\n')
for line, tagged in zip(text, tagged_lines):
f.write(line)
f.write(' '.join([f'{token}__<label>__{tag}' for token, tag in tagged]) + '\n')
FOLDERS = ['10M', '100M', 'dev', 'test']
SECTION = "original"
RUN_UNSUPERVISED_TAGGER = True
if __name__ == "__main__":
tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_NAME)
# Read all text files from directory "BabyLM"
all_files = []
for folder in FOLDERS:
for root, dirs, files in os.walk(f"{SECTION}/{folder}"):
for file in files:
if file.endswith(".txt"):
all_files.append(os.path.join(root, file))
# Get map from PTB tags to universal tags
en_ptb_map = {}
with open('en-ptb.map', 'r') as f:
for line in f.readlines():
(key, val) = line.split()
en_ptb_map[key] = val
for file in all_files:
print(file)
with open(file, 'r') as f:
lines = f.readlines()[1:]
lines = [line.strip()+'\n' for line in lines if line.strip() != '']
tagged_file = file.replace(f'{SECTION}', f'{SECTION}_tagged')
gold_tagged_file = file.replace(f'{SECTION}', f'{SECTION}_tagged_gold')
# 1. Tokenize the lines in the text, tag with universal tags and write to tmp file
tokenized = tokenize_lines(lines, tokenizer)
tagged = tag_with_nltk(tokenized, en_ptb_map)
if not RUN_UNSUPERVISED_TAGGER:
# Save the gold tags
gold_tagged_lines = tagged
os.makedirs(os.path.dirname(gold_tagged_file), exist_ok=True)
write_tagged_lines(gold_tagged_file, lines, tagged)
continue
# 2. Run the unsupervised tagger on the tmp file
write_to_file(tagged, 'tmp.txt')
os.system(f'./../anchor/hmm --output ../pos_tagging/10M_train_30_extended --data tmp.txt --pred tmp_tagged.txt')
# 3. Get the gold tags and predicted tags
gold_tagged_lines, pred_tagged_lines = get_tags_from_file('tmp_tagged.txt')
os.remove('tmp.txt')
os.remove('tmp_tagged.txt')
assert len(gold_tagged_lines) == len(pred_tagged_lines) == len(lines)
# 4. Write the tagged lines to the original file
os.makedirs(os.path.dirname(tagged_file), exist_ok=True)
write_tagged_lines(tagged_file, lines, pred_tagged_lines)
os.makedirs(os.path.dirname(gold_tagged_file), exist_ok=True)
write_tagged_lines(gold_tagged_file, lines, gold_tagged_lines)