|
import re |
|
import sys |
|
from typing import List |
|
|
|
from datasets import load_dataset, Value, Sequence |
|
import json |
|
|
|
ENTITY = 'entity' |
|
ENTITY_PATTERN = r'<entity> (.*?) </entity>' |
|
|
|
|
|
def markup_entity(utt: str, entities: List[str]): |
|
|
|
entities = sorted(list(set(entities)), key=lambda x: len(x), reverse=True) |
|
for i, entity in enumerate(entities): |
|
valid = entity not in ENTITY |
|
for prev in entities[:i]: |
|
if entity in prev: |
|
valid = False |
|
if valid: |
|
utt = re.sub(entity, ENTITY_PATTERN.format(entity), utt) |
|
return utt |
|
|
|
def print_dataset_info(dataset): |
|
|
|
print("features:") |
|
for feature_name, feature in dataset.features.items(): |
|
if True: |
|
print(f" - name: {feature_name}") |
|
if isinstance(feature, Value): |
|
print(f" dtype: {feature.dtype}") |
|
elif isinstance(feature, Sequence): |
|
print(f" sequence: {feature.feature.dtype}") |
|
|
|
|
|
print("splits:") |
|
for split_name, split_info in dataset.info.splits.items(): |
|
print(f" - name: {split_name}") |
|
print(f" num_bytes: {split_info.num_bytes}") |
|
print(f" num_examples: {split_info.num_examples}") |
|
|
|
|
|
print(f"download_size: {dataset.info.download_size}") |
|
print(f"dataset_size: {dataset.info.dataset_size}") |
|
|
|
|
|
if __name__ == "__main__": |
|
dataset = load_dataset(sys.argv[1], sys.argv[2]) |
|
print_dataset_info(dataset["train"]) |
|
|
|
print("An example of 'test' looks as follows.") |
|
print("```") |
|
print(json.dumps(dataset["test"][0], indent=2, sort_keys=True)) |
|
print("```") |
|
|