areias commited on
Commit
bddb3dc
1 Parent(s): fef46a1

Upload preprocess_data.py

Browse files

Code to generate new columns from original data

Files changed (1) hide show
  1. preprocess_data.py +128 -0
preprocess_data.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### Code to generate dataset
2
+
3
+ # %%
4
+ from datasets import load_dataset
5
+
6
+ dataset = load_dataset("conll2003")
7
+
8
+ # %%
9
+ dataset
10
+
11
+ # %%
12
+ dataset['train'][0]['tokens']
13
+
14
+ # %%
15
+ ner_tags= {'O': 0, 'B-PER': 1, 'I-PER': 2, 'B-ORG': 3, 'I-ORG': 4, 'B-LOC': 5, 'I-LOC': 6, 'B-MISC': 7, 'I-MISC': 8}
16
+
17
+ # %%
18
+ # Swap keys and values using dictionary comprehension
19
+ swapped_dict = {v: k for k, v in ner_tags.items()}
20
+
21
+ # Print the swapped dictionary
22
+ print(swapped_dict)
23
+
24
+ # %%
25
+ [swapped_dict[x] for x in dataset['train'][0]['ner_tags']]
26
+
27
+ # %%
28
+ dataset['train'][0]
29
+
30
+ # %%
31
+ def label_tokens(entry):
32
+ entry['ner_labels'] = [swapped_dict[x] for x in entry['ner_tags']]
33
+ return entry
34
+
35
+
36
+ # %%
37
+ dataset['train'] = dataset["train"].map(label_tokens)
38
+ dataset['test'] = dataset["test"].map(label_tokens)
39
+ dataset['validation'] = dataset["validation"].map(label_tokens)
40
+
41
+
42
+ # %%
43
+ def tokens_to_sentence(entry):
44
+ entry['sentence'] = ' '.join(entry['tokens'])
45
+ return entry
46
+
47
+ dataset['train'] = dataset["train"].map(tokens_to_sentence)
48
+ dataset['test'] = dataset["test"].map(tokens_to_sentence)
49
+ dataset['validation'] = dataset["validation"].map(tokens_to_sentence)
50
+
51
+
52
+ # %%
53
+ def extract_entities(entry):
54
+ entities = {'PER': [], 'ORG': [], 'LOC': [], 'MISC': []}
55
+ current_entity = {"type": None, "words": []}
56
+ for word, label in zip(entry['sentence'].split(), entry['ner_labels']):
57
+ if label.startswith('B-'):
58
+ entity_type = label.split('-')[1]
59
+ if current_entity["type"] == entity_type:
60
+ entities[entity_type].append(' '.join(current_entity["words"]))
61
+ current_entity["words"] = [word]
62
+ else:
63
+ if current_entity["type"] is not None:
64
+ entities[current_entity["type"]].append(' '.join(current_entity["words"]))
65
+ current_entity = {"type": entity_type, "words": [word]}
66
+ elif label.startswith('I-'):
67
+ if current_entity["type"] is not None:
68
+ current_entity["words"].append(word)
69
+ else:
70
+ if current_entity["type"] is not None:
71
+ entities[current_entity["type"]].append(' '.join(current_entity["words"]))
72
+ current_entity = {"type": None, "words": []}
73
+ if current_entity["type"] is not None:
74
+ entities[current_entity["type"]].append(' '.join(current_entity["words"]))
75
+
76
+ entry['entities'] = entities
77
+ return entry
78
+
79
+ # Extract entities
80
+ dataset['train'] = dataset["train"].map(extract_entities)
81
+ dataset['test'] = dataset["test"].map(extract_entities)
82
+ dataset['validation'] = dataset["validation"].map(extract_entities)
83
+
84
+
85
+
86
+ # %%
87
+ dataset['train'][10]['sentence'], dataset['train'][10]['entities']
88
+
89
+ # %%
90
+ dataset.push_to_hub("areias/conll2003-generative")
91
+
92
+ # %%
93
+ from collections import Counter
94
+
95
+ def get_count(entries):
96
+ # Initialize counters for each entity type
97
+ per_counter = Counter()
98
+ org_counter = Counter()
99
+ loc_counter = Counter()
100
+ misc_counter = Counter()
101
+
102
+ # Count the occurrences of each type of entity
103
+ for item in entries:
104
+ per_counter.update(item['entities']['PER'])
105
+ org_counter.update(item['entities']['ORG'])
106
+ loc_counter.update(item['entities']['LOC'])
107
+ misc_counter.update(item['entities']['MISC'])
108
+
109
+ # Print the counts for each type of entity
110
+ print("Total PER entities:", sum(per_counter.values()))
111
+ print("Total ORG entities:", sum(org_counter.values()))
112
+ print("Total LOC entities:", sum(loc_counter.values()))
113
+ print("Total MISC entities:", sum(misc_counter.values()))
114
+
115
+
116
+ # %%
117
+ get_count(dataset['train'])
118
+
119
+ # %%
120
+ get_count(dataset['test'])
121
+
122
+ # %%
123
+ get_count(dataset['validation'])
124
+
125
+ # %%
126
+
127
+
128
+