tomaarsen HF staff commited on
Commit
d1b7df4
1 Parent(s): 89877ef

Add simple README

Browse files
Files changed (1) hide show
  1. README.md +104 -1
README.md CHANGED
@@ -34,4 +34,107 @@ dataset_info:
34
  ---
35
  # Dataset Card for "ner-orgs"
36
 
37
- [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  ---
35
  # Dataset Card for "ner-orgs"
36
 
37
+ This dataset is a concatenation of subsets of [Few-NERD](https://huggingface.co/datasets/DFKI-SLT/few-nerd), [CoNLL 2003](https://huggingface.co/datasets/conll2003) and [OntoNotes v5](https://huggingface.co/datasets/tner/ontonotes5), but only the "B-ORG" and "I-ORG" labels.
38
+
39
+ Exactly half of the samples per split contain organisations, while the other half do not contain any.
40
+
41
+ It was generated using the following script:
42
+
43
+ ```py
44
+ import random
45
+ from datasets import load_dataset, concatenate_datasets, Features, Sequence, ClassLabel, Value, DatasetDict
46
+
47
+
48
+ FEATURES = Features(
49
+ {
50
+ "tokens": Sequence(feature=Value(dtype="string")),
51
+ "ner_tags": Sequence(feature=ClassLabel(names=["O", "B-ORG", "I-ORG"])),
52
+ }
53
+ )
54
+
55
+
56
+ def load_fewnerd():
57
+ def mapper(sample):
58
+ sample["ner_tags"] = [int(tag == 5) for tag in sample["ner_tags"]]
59
+ sample["ner_tags"] = [
60
+ 2 if tag == 1 and idx > 0 and sample["ner_tags"][idx - 1] == 1 else tag
61
+ for idx, tag in enumerate(sample["ner_tags"])
62
+ ]
63
+ return sample
64
+
65
+ dataset = load_dataset("DFKI-SLT/few-nerd", "supervised")
66
+ dataset = dataset.map(mapper, remove_columns=["id", "fine_ner_tags"])
67
+ dataset = dataset.cast(FEATURES)
68
+ return dataset
69
+
70
+
71
+ def load_conll():
72
+ label_mapping = {3: 1, 4: 2}
73
+
74
+ def mapper(sample):
75
+ sample["ner_tags"] = [label_mapping.get(tag, 0) for tag in sample["ner_tags"]]
76
+ return sample
77
+
78
+ dataset = load_dataset("conll2003")
79
+ dataset = dataset.map(mapper, remove_columns=["id", "pos_tags", "chunk_tags"])
80
+ dataset = dataset.cast(FEATURES)
81
+ return dataset
82
+
83
+
84
+ def load_ontonotes():
85
+ label_mapping = {11: 1, 12: 2}
86
+
87
+ def mapper(sample):
88
+ sample["ner_tags"] = [label_mapping.get(tag, 0) for tag in sample["ner_tags"]]
89
+ return sample
90
+
91
+ dataset = load_dataset("tner/ontonotes5")
92
+ dataset = dataset.rename_column("tags", "ner_tags")
93
+ dataset = dataset.map(mapper)
94
+ dataset = dataset.cast(FEATURES)
95
+ return dataset
96
+
97
+
98
+ def has_org(sample):
99
+ return bool(sum(sample["ner_tags"]))
100
+
101
+
102
+ def has_no_org(sample):
103
+ return not has_org(sample)
104
+
105
+
106
+ def preprocess_raw_dataset(raw_dataset):
107
+ # Set the number of sentences without an org equal to the number of sentences with an org
108
+ dataset_org = raw_dataset.filter(has_org)
109
+ dataset_no_org = raw_dataset.filter(has_no_org)
110
+ dataset_no_org = dataset_no_org.select(random.sample(range(len(dataset_no_org)), k=len(dataset_org)))
111
+ dataset = concatenate_datasets([dataset_org, dataset_no_org])
112
+ return dataset
113
+
114
+
115
+ def main() -> None:
116
+ fewnerd_dataset = load_fewnerd()
117
+ conll_dataset = load_conll()
118
+ ontonotes_dataset = load_ontonotes()
119
+
120
+ raw_train_dataset = concatenate_datasets([fewnerd_dataset["train"], conll_dataset["train"], ontonotes_dataset["train"]])
121
+ raw_eval_dataset = concatenate_datasets([fewnerd_dataset["validation"], conll_dataset["validation"], ontonotes_dataset["validation"]])
122
+ raw_test_dataset = concatenate_datasets([fewnerd_dataset["test"], conll_dataset["test"], ontonotes_dataset["test"]])
123
+
124
+ train_dataset = preprocess_raw_dataset(raw_train_dataset)
125
+ eval_dataset = preprocess_raw_dataset(raw_eval_dataset)
126
+ test_dataset = preprocess_raw_dataset(raw_test_dataset)
127
+
128
+ dataset_dict = DatasetDict(
129
+ {
130
+ "train": train_dataset,
131
+ "validation": eval_dataset,
132
+ "test": test_dataset,
133
+ }
134
+ )
135
+ dataset_dict.push_to_hub("ner-orgs", private=True)
136
+
137
+
138
+ if __name__ == "__main__":
139
+ main()
140
+ ```