DipakBundheliya commited on
Commit
e0bf5a5
1 Parent(s): 4ec75e3

upload Shipping-label-NER file

Browse files
Files changed (1) hide show
  1. Shipping-label-NER.py +129 -0
Shipping-label-NER.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import datasets
3
+ # coding=utf-8
4
+ # Copyright 2024 HuggingFace Datasets Authors.
5
+ # Lint as: python3
6
+ """The Shipping label Dataset. it converts conll to ner input format"""
7
+
8
+
9
+ logger = datasets.logging.get_logger(__name__)
10
+
11
+
12
+ _CITATION = """
13
+ """
14
+
15
+ _DESCRIPTION = """
16
+ The goal of this task is to provide a dataset for name entity recognition."""
17
+
18
+ _URL = "https://raw.githubusercontent.com/SanghaviHarshPankajkumar/shipping_label_project/main/NER/data/"
19
+ _TRAINING_FILE = "train.txt"
20
+ _VAL_FILE = "val.txt"
21
+ _TEST_FILE = "test.txt"
22
+
23
+
24
+ class shipping_labels_Config(datasets.BuilderConfig):
25
+ """Shipping Label Dataset for ner"""
26
+
27
+ def __init__(self, **kwargs):
28
+ """BuilderConfig for Shipping Label data.
29
+ Args:
30
+ **kwargs: keyword arguments forwarded to super.
31
+ """
32
+ super(shipping_labels_Config, self).__init__(**kwargs)
33
+
34
+
35
+ class shiping_label_ner(datasets.GeneratorBasedBuilder):
36
+ """Shipping Label Dataset for ner"""
37
+
38
+ BUILDER_CONFIGS = [
39
+ shipping_labels_Config(
40
+ name="shipping_label_ner", version=datasets.Version("1.0.0"), description="Shipping Label Dataset for ner"
41
+ ),
42
+ ]
43
+
44
+ def _info(self):
45
+ return datasets.DatasetInfo(
46
+ description=_DESCRIPTION,
47
+ features=datasets.Features(
48
+ {
49
+ "id": datasets.Value("string"),
50
+ "tokens": datasets.Sequence(datasets.Value("string")),
51
+ "ner_tags": datasets.Sequence(
52
+ datasets.features.ClassLabel(
53
+ names=[
54
+ "O",
55
+ "B-GCNUM",
56
+ "I-GCNUM",
57
+ "B-BGNUM",
58
+ "I-BGNUM",
59
+ "B-DATE",
60
+ "I-DATE",
61
+ "B-ORG",
62
+ "I-ORG",
63
+ "B-LOCATION",
64
+ "I-LOCATION",
65
+ "B-NAME",
66
+ "I-NAME",
67
+ "B-BARCODE",
68
+ "I-BARCODE",
69
+ ]
70
+ )
71
+ ),
72
+ }
73
+ ),
74
+ supervised_keys=None,
75
+ citation=_CITATION,
76
+ )
77
+
78
+ def _split_generators(self, dl_manager):
79
+ """Returns SplitGenerators."""
80
+ urls_to_download = {
81
+ "train": f"{_URL}{_TRAINING_FILE}",
82
+ "test": f"{_URL}{_TEST_FILE}",
83
+ "val": f"{_URL}{_VAL_FILE}",
84
+ }
85
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
86
+
87
+ return [
88
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
89
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
90
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["val"]}),
91
+ ]
92
+
93
+ def _generate_examples(self, filepath):
94
+ logger.info("⏳ Generating examples from = %s", filepath)
95
+ with open(filepath, encoding="utf-8") as f:
96
+ current_tokens = []
97
+ current_labels = []
98
+ sentence_counter = 0
99
+ for row in f:
100
+ row = row.rstrip()
101
+ if row:
102
+ token, label = row.split(" ")
103
+ current_tokens.append(token)
104
+ current_labels.append(label)
105
+ else:
106
+ # New sentence
107
+ if not current_tokens:
108
+ # Consecutive empty lines will cause empty sentences
109
+ continue
110
+ assert len(current_tokens) == len(current_labels), "💔 between len of tokens & labels"
111
+ sentence = (
112
+ sentence_counter,
113
+ {
114
+ "id": str(sentence_counter),
115
+ "tokens": current_tokens,
116
+ "ner_tags": current_labels,
117
+ },
118
+ )
119
+ sentence_counter += 1
120
+ current_tokens = []
121
+ current_labels = []
122
+ yield sentence
123
+ # Don't forget last sentence in dataset 🧐
124
+ if current_tokens:
125
+ yield sentence_counter, {
126
+ "id": str(sentence_counter),
127
+ "tokens": current_tokens,
128
+ "ner_tags": current_labels,
129
+ }