|
from utils import parse, read_json_file, write_jsonl_file |
|
import os |
|
|
|
def get_prefix_lengths(lengths): |
|
prefix_lengths = [0] |
|
for length in lengths: |
|
prefix_lengths.append(prefix_lengths[-1] + length) |
|
|
|
return prefix_lengths |
|
|
|
|
|
def custom_join(tokens, start, end): |
|
joined_str = "" |
|
for i in range(start, end): |
|
joined_str += tokens[i] |
|
if ( |
|
i == end - 1 |
|
or tokens[i + 1] in ["-", ","] |
|
or tokens[i + 1].startswith("'") |
|
or tokens[i] == "-" |
|
): |
|
continue |
|
joined_str += " " |
|
|
|
if joined_str.count('"') > 1: |
|
start = joined_str.index('"') |
|
end = joined_str[start + 1 :].index('"') |
|
joined_str = ( |
|
joined_str[: start + 1] |
|
+ joined_str[start + 1 :][:end].strip() |
|
+ joined_str[start + 1 :][end:] |
|
) |
|
|
|
return joined_str |
|
|
|
|
|
def parse_characters(utterance, tokens, character_entities): |
|
utterance_char_idx = 0 |
|
characters = [] |
|
for sent_idx, sent in enumerate(character_entities): |
|
prefix_lengths = get_prefix_lengths(map(len, tokens[sent_idx])) |
|
for character in sent: |
|
span = ", ".join(character[2:]) |
|
scan_length = prefix_lengths[character[0]] |
|
start = utterance_char_idx |
|
|
|
while scan_length >= 0 and start < len(utterance): |
|
if scan_length == 0 and utterance[start] != " ": |
|
break |
|
|
|
if utterance[start] != " ": |
|
scan_length -= 1 |
|
|
|
start += 1 |
|
|
|
scan_length = prefix_lengths[character[1]] - prefix_lengths[character[0]] |
|
end = start |
|
|
|
while scan_length > 0 and end < len(utterance): |
|
if utterance[end] != " ": |
|
scan_length -= 1 |
|
|
|
end += 1 |
|
|
|
characters.append({"value": span, "start": start, "end": end}) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if utterance[start:end] == "Emily- noooo": |
|
continue |
|
|
|
assert utterance[start:end] == custom_join( |
|
tokens[sent_idx], character[0], character[1] |
|
) |
|
|
|
|
|
scan_length = prefix_lengths[-1] |
|
while scan_length >= 0 and utterance_char_idx < len(utterance): |
|
if scan_length == 0 and utterance[utterance_char_idx] != " ": |
|
break |
|
|
|
if utterance[utterance_char_idx] != " ": |
|
scan_length -= 1 |
|
|
|
utterance_char_idx += 1 |
|
|
|
return characters |
|
|
|
|
|
def preprocess(args, split): |
|
input_file = os.path.join(args.input_dir, f"character-identification-{split}.json") |
|
|
|
if split == "trn": |
|
split = "train" |
|
elif split == "tst": |
|
split = "test" |
|
|
|
output_file = os.path.join(args.output_dir, f"{split}.jsonl") |
|
|
|
episodes = read_json_file(input_file)["episodes"] |
|
|
|
processed_data = [] |
|
for episode in episodes: |
|
scenes = episode["scenes"] |
|
for scene in scenes: |
|
utterances = scene["utterances"] |
|
dialog = { |
|
"turn": "multi", |
|
"locale": "en", |
|
"dialog": [], |
|
} |
|
|
|
roles = ["#GENERAL#", "#OTHER#", "#ALL#"] |
|
|
|
for example in utterances: |
|
utterance = example["transcript"] |
|
tokens = example["tokens"] |
|
character_entities = example["character_entities"] |
|
|
|
characters = parse_characters(utterance, tokens, character_entities) |
|
dialog["dialog"].append( |
|
{ |
|
"roles": example["speakers"], |
|
"utterance": utterance, |
|
"characters": characters, |
|
} |
|
) |
|
|
|
roles += example["speakers"] |
|
|
|
|
|
|
|
|
|
|
|
dialog["knowledge"] = {"type": "lsit", "value": sorted(roles)} |
|
processed_data.append(dialog) |
|
|
|
write_jsonl_file(processed_data, output_file) |
|
|
|
|
|
if __name__ == "__main__": |
|
args = parse() |
|
preprocess(args, "trn") |
|
preprocess(args, "dev") |
|
preprocess(args, "tst") |
|
|