Datasets:
File size: 3,630 Bytes
bddb3dc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 |
### Code to generate dataset
# %%
from datasets import load_dataset
dataset = load_dataset("conll2003")
# %%
dataset
# %%
dataset['train'][0]['tokens']
# %%
ner_tags= {'O': 0, 'B-PER': 1, 'I-PER': 2, 'B-ORG': 3, 'I-ORG': 4, 'B-LOC': 5, 'I-LOC': 6, 'B-MISC': 7, 'I-MISC': 8}
# %%
# Swap keys and values using dictionary comprehension
swapped_dict = {v: k for k, v in ner_tags.items()}
# Print the swapped dictionary
print(swapped_dict)
# %%
[swapped_dict[x] for x in dataset['train'][0]['ner_tags']]
# %%
dataset['train'][0]
# %%
def label_tokens(entry):
entry['ner_labels'] = [swapped_dict[x] for x in entry['ner_tags']]
return entry
# %%
dataset['train'] = dataset["train"].map(label_tokens)
dataset['test'] = dataset["test"].map(label_tokens)
dataset['validation'] = dataset["validation"].map(label_tokens)
# %%
def tokens_to_sentence(entry):
entry['sentence'] = ' '.join(entry['tokens'])
return entry
dataset['train'] = dataset["train"].map(tokens_to_sentence)
dataset['test'] = dataset["test"].map(tokens_to_sentence)
dataset['validation'] = dataset["validation"].map(tokens_to_sentence)
# %%
def extract_entities(entry):
entities = {'PER': [], 'ORG': [], 'LOC': [], 'MISC': []}
current_entity = {"type": None, "words": []}
for word, label in zip(entry['sentence'].split(), entry['ner_labels']):
if label.startswith('B-'):
entity_type = label.split('-')[1]
if current_entity["type"] == entity_type:
entities[entity_type].append(' '.join(current_entity["words"]))
current_entity["words"] = [word]
else:
if current_entity["type"] is not None:
entities[current_entity["type"]].append(' '.join(current_entity["words"]))
current_entity = {"type": entity_type, "words": [word]}
elif label.startswith('I-'):
if current_entity["type"] is not None:
current_entity["words"].append(word)
else:
if current_entity["type"] is not None:
entities[current_entity["type"]].append(' '.join(current_entity["words"]))
current_entity = {"type": None, "words": []}
if current_entity["type"] is not None:
entities[current_entity["type"]].append(' '.join(current_entity["words"]))
entry['entities'] = entities
return entry
# Extract entities
dataset['train'] = dataset["train"].map(extract_entities)
dataset['test'] = dataset["test"].map(extract_entities)
dataset['validation'] = dataset["validation"].map(extract_entities)
# %%
dataset['train'][10]['sentence'], dataset['train'][10]['entities']
# %%
dataset.push_to_hub("areias/conll2003-generative")
# %%
from collections import Counter
def get_count(entries):
# Initialize counters for each entity type
per_counter = Counter()
org_counter = Counter()
loc_counter = Counter()
misc_counter = Counter()
# Count the occurrences of each type of entity
for item in entries:
per_counter.update(item['entities']['PER'])
org_counter.update(item['entities']['ORG'])
loc_counter.update(item['entities']['LOC'])
misc_counter.update(item['entities']['MISC'])
# Print the counts for each type of entity
print("Total PER entities:", sum(per_counter.values()))
print("Total ORG entities:", sum(org_counter.values()))
print("Total LOC entities:", sum(loc_counter.values()))
print("Total MISC entities:", sum(misc_counter.values()))
# %%
get_count(dataset['train'])
# %%
get_count(dataset['test'])
# %%
get_count(dataset['validation'])
# %%
|