""" Script used to tag the data with POS tags. """ import os import re from transformers import AutoTokenizer import nltk, sys TOKENIZER_NAME = 'cambridge-climb/CamBabyTokenizer-8192' UNSUPERVISED_POS_TAG_MAP = { "and" : 'CONJ', "|" : 'NOUN', "states" : 'NOUN', "school" : 'NOUN', ".\"" : '.', "-" : '.', "five" : 'NUM', "1" : 'NUM', "they" : 'PRON', "of" : 'ADP', "are" : 'VERB', "(" : '.', "american" : 'ADJ', "'s" : 'VERB', "\"" : 'NOUN', "the" : 'DET', "a" : 'DET', "after" : 'ADP', "th" : 'NOUN', "good" : 'ADJ', "her" : 'PRON', "night" : 'NOUN', "to" : 'PRT', "used" : 'VERB', "," : '.', "sir" : 'NOUN', "tell" : 'VERB', "lot" : 'NOUN', "amp" : 'NOUN', "doing" : 'VERB' } def tag_with_nltk(text, en_ptb_map): """ Given a list of text, tag each word with its POS tag using NLTK """ new_lines = [] for line in text: tokens = line.split() tagged = nltk.pos_tag(tokens) # Map the NLTK PTB tags to the universal tags tagged = [(token, en_ptb_map[tag]) for (token, tag) in tagged] new_lines.append(tagged) return new_lines def write_to_file(tagged, output_file): """ Given a list of tagged lines, write them to the given output file """ with open(output_file, 'w') as f: for line in tagged: for token, tag in line: f.write(f'{token}__