joelniklaus commited on
Commit
715e3e7
1 Parent(s): c22cb0e

changed notation scheme to IOB

Browse files
Files changed (5) hide show
  1. README.md +3 -1
  2. convert_to_hf_dataset.py +101 -0
  3. test.jsonl +2 -2
  4. train.jsonl +2 -2
  5. validation.jsonl +2 -2
README.md CHANGED
@@ -3,7 +3,7 @@ annotations_creators:
3
  - other
4
  language_creators:
5
  - found
6
- languages:
7
  - el
8
  license:
9
  - cc-by-nc-sa-4.0
@@ -93,6 +93,8 @@ The files contain the following data fields
93
  - `PUBLIC-DOCS`: Public Document Reference; any reference to documents or decisions that have been published by a public institution (organization) that are not considered a primary source of legislation (e.g., local decisions, announcements, memorandums, directives).
94
  - `O`: No entity annotation present
95
 
 
 
96
  ### Data Splits
97
 
98
  The dataset has three splits: *train*, *validation* and *test*.
 
3
  - other
4
  language_creators:
5
  - found
6
+ language:
7
  - el
8
  license:
9
  - cc-by-nc-sa-4.0
 
93
  - `PUBLIC-DOCS`: Public Document Reference; any reference to documents or decisions that have been published by a public institution (organization) that are not considered a primary source of legislation (e.g., local decisions, announcements, memorandums, directives).
94
  - `O`: No entity annotation present
95
 
96
+ The final tagset (in IOB notation) is the following: `['O', 'B-ORG', 'I-ORG', 'B-GPE', 'I-GPE', 'B-LEG-REFS', 'I-LEG-REFS', 'B-PUBLIC-DOCS', 'I-PUBLIC-DOCS', 'B-PERSON', 'I-PERSON', 'B-FACILITY', 'I-FACILITY', 'B-LOCATION-UNK', 'I-LOCATION-UNK', 'B-LOCATION-NAT', 'I-LOCATION-NAT']`
97
+
98
  ### Data Splits
99
 
100
  The dataset has three splits: *train*, *validation* and *test*.
convert_to_hf_dataset.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from glob import glob
3
+ from pathlib import Path
4
+
5
+ from typing import List
6
+
7
+ import pandas as pd
8
+
9
+ from spacy.lang.el import Greek
10
+
11
+ pd.set_option('display.max_colwidth', None)
12
+ pd.set_option('display.max_columns', None)
13
+
14
+ base_path = Path("DATASETS/ENTITY RECOGNITION")
15
+ tokenizer = Greek().tokenizer
16
+
17
+
18
+ # A and D are different government gazettes
19
+ # A is the general one, publishing standard legislation, and D is meant for legislation on urban planning and such things
20
+
21
+ def process_document(ann_file: str, text_file: Path, metadata: dict, tokenizer) -> List[dict]:
22
+ """Processes one document (.ann file and .txt file) and returns a list of annotated sentences"""
23
+ # read the ann file into a df
24
+ ann_df = pd.read_csv(ann_file, sep="\t", header=None, names=["id", "entity_with_span", "entity_text"])
25
+ sentences = [sent for sent in text_file.read_text().split("\n") if sent] # remove empty sentences
26
+
27
+ # split into individual columns
28
+ ann_df[["entity", "start", "end"]] = ann_df["entity_with_span"].str.split(" ", expand=True)
29
+ ann_df.start = ann_df.start.astype(int)
30
+ ann_df.end = ann_df.end.astype(int)
31
+
32
+ not_found_entities = 0
33
+ annotated_sentences = []
34
+ current_start_index = 0
35
+ for sentence in sentences:
36
+ ann_sent = {**metadata}
37
+
38
+ doc = tokenizer(sentence)
39
+ doc_start_index = current_start_index
40
+ doc_end_index = current_start_index + len(sentence)
41
+ current_start_index = doc_end_index + 1
42
+
43
+ relevant_annotations = ann_df[(ann_df.start >= doc_start_index) & (ann_df.end <= doc_end_index)]
44
+ for _, row in relevant_annotations.iterrows():
45
+ sent_start_index = row["start"] - doc_start_index
46
+ sent_end_index = row["end"] - doc_start_index
47
+ char_span = doc.char_span(sent_start_index, sent_end_index, label=row["entity"], alignment_mode="expand")
48
+ # ent_span = Span(doc, char_span.start, char_span.end, row["entity"])
49
+ if char_span:
50
+ doc.set_ents([char_span])
51
+ else:
52
+ not_found_entities += 1
53
+ print(f"Could not find entity `{row['entity_text']}` in sentence `{sentence}`")
54
+
55
+ ann_sent["words"] = [str(tok) for tok in doc]
56
+ ann_sent["ner"] = [tok.ent_iob_ + "-" + tok.ent_type_ if tok.ent_type_ else "O" for tok in doc]
57
+
58
+ annotated_sentences.append(ann_sent)
59
+
60
+ print(f"Did not find entities in {not_found_entities} cases")
61
+ return annotated_sentences
62
+
63
+
64
+ def read_to_df(split):
65
+ """Reads the different documents and saves metadata"""
66
+ ann_files = glob(str(base_path / split / "ANN" / "*/*/*.ann"))
67
+ sentences = []
68
+ for ann_file in ann_files:
69
+ path = Path(ann_file)
70
+ year = path.parent.stem
71
+ file_name = path.stem
72
+ _, gazette, gazette_number, _, date = tuple(file_name.split(' '))
73
+ text_file = base_path / split / "TXT" / f"{gazette}/{year}/{file_name}.txt"
74
+ metadata = {
75
+ "date": date,
76
+ "gazette": gazette,
77
+ # "gazette_number": gazette_number,
78
+ }
79
+ sentences.extend(process_document(ann_file, text_file, metadata, tokenizer))
80
+ return pd.DataFrame(sentences)
81
+
82
+
83
+ splits = ["TRAIN", "VALIDATION", "TEST"]
84
+ train = read_to_df("TRAIN")
85
+ validation = read_to_df("VALIDATION")
86
+ test = read_to_df("TEST")
87
+
88
+ df = pd.concat([train, validation, test])
89
+ print(f"The final tagset (in IOB notation) is the following: `{list(df.ner.explode().unique())}`")
90
+
91
+
92
+ # save splits
93
+ def save_splits_to_jsonl(config_name):
94
+ # save to jsonl files for huggingface
95
+ if config_name: os.makedirs(config_name, exist_ok=True)
96
+ train.to_json(os.path.join(config_name, "train.jsonl"), lines=True, orient="records", force_ascii=False)
97
+ validation.to_json(os.path.join(config_name, "validation.jsonl"), lines=True, orient="records", force_ascii=False)
98
+ test.to_json(os.path.join(config_name, "test.jsonl"), lines=True, orient="records", force_ascii=False)
99
+
100
+
101
+ save_splits_to_jsonl("")
test.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:76d25a3c1a64ad408e3272aee091f98eddf178b360aa550476c7e8b985f71771
3
- size 2263011
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ad0654caac860a9d6d3d7d7834f913afaafc6cc5506c379c50c3d86ef12d559
3
+ size 2278020
train.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f308076197d3ddfaa21ad7f99275cd4b545d68531698aed434f42cdcb31e8fec
3
- size 9153743
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e0a17fd23cf770e960e14d4972c3245bc9a8f65cf616a8db4a7daaf4fd0e519
3
+ size 9226475
validation.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:06d029554fed1f2b49d17ff12a1d0084fc18845d79737c28ece8f243a100e030
3
- size 2428314
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d91aab2c40560e498d8948fd4133eda83f519ebb2575d59901b343b8f91dda15
3
+ size 2445108