File size: 4,635 Bytes
4477da2
 
df81834
 
b54f303
1787149
6d5d943
1787149
8bd2e0e
2b933c2
06e4a0f
3ddb755
4477da2
06e4a0f
db585e5
 
 
2b933c2
3ddb755
2b933c2
06e4a0f
4477da2
784523e
 
06e4a0f
 
4477da2
 
aab1af9
4477da2
 
 
 
 
 
 
 
05ec266
080e2f2
b969b61
1787149
 
 
 
 
 
080e2f2
1787149
 
 
 
4477da2
 
 
 
3ddb755
4477da2
 
86cd38e
 
88c79c5
ae0eda7
6d5d943
1787149
86cd38e
88c79c5
ed7f4e7
86cd38e
4c08ad7
6d5d943
1787149
86cd38e
40a738a
5a8ec1d
 
 
40a738a
 
 
 
 
 
 
 
 
 
 
 
 
 
8a48f8c
40a738a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5a8ec1d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
import datasets

logger = datasets.logging.get_logger(__name__)

_URL = "https://raw.githubusercontent.com/Kriyansparsana/demorepo/main/"
_TRAINING_FILE = "Indian_dataset_wnut_train.conll"
# _DEV_FILE = "indian_dataset.conll"
_TEST_FILE = "emerging.test.annotated"

class indian_namesConfig(datasets.BuilderConfig):
    """The WNUT 17 Emerging Entities Dataset."""

    def __init__(self, **kwargs):
        """BuilderConfig for WNUT 17.
        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(indian_namesConfig, self).__init__(**kwargs)

class indian_names(datasets.GeneratorBasedBuilder):
    """The WNUT 17 Emerging Entities Dataset."""

    BUILDER_CONFIGS = [
        indian_namesConfig(
            name="indian_names", version=datasets.Version("1.0.0"), description="The WNUT 17 Emerging Entities Dataset"
        ),
    ]

    def _info(self):
        return datasets.DatasetInfo(
            features=datasets.Features(
                {
                    "id": datasets.Value("string"),
                    "tokens": datasets.Sequence(datasets.Value("string")),
                    "ner_tags": datasets.Sequence(
                        datasets.features.ClassLabel(
                            names=[
                                "O",
                                "B-corporation",
                                "I-corporation",
                                "B-creative-work",
                                "I-creative-work",
                                "B-group",
                                "I-group",
                                "B-location",
                                "I-location",
                                "B-person",
                                "I-person",
                                "B-product",
                                "I-product",          
                            ]
                        )
                    ),
                }
            ),
            supervised_keys=None,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        urls_to_download = {
            "train": f"{_URL}{_TRAINING_FILE}",
            # "dev": f"{_URL}{_DEV_FILE}",
            "test": f"{_URL}{_TEST_FILE}",
        }
        downloaded_files = dl_manager.download_and_extract(urls_to_download)

        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
            # datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
        ]
            
    def _generate_examples(self, filepath):
        logger.info("⏳ Generating examples from = %s", filepath)
        with open(filepath, encoding="utf-8") as f:
            current_tokens = []
            current_labels = []
            sentence_counter = 0
            for row in f:
                row = row.rstrip()
                if row:
                    if "\t" in row:
                        token, label = row.split("\t")
                        current_tokens.append(token)
                        current_labels.append(label)
                    else:
                        # Handle cases where the delimiter is missing
                        # You can choose to skip these rows or handle them differently
                        logger.warning(f"Delimiter missing in row: {row}")
                else:
                    # New sentence
                    if not current_tokens:
                        # Consecutive empty lines will cause empty sentences
                        continue
                    assert len(current_tokens) == len(current_labels), "💔 between len of tokens & labels"
                    sentence = (
                        sentence_counter,
                        {
                            "id": str(sentence_counter),
                            "tokens": current_tokens,
                            "ner_tags": current_labels,
                        },
                    )
                    sentence_counter += 1
                    current_tokens = []
                    current_labels = []
                    yield sentence
            # Don't forget the last sentence in the dataset 🧐
            if current_tokens:
                yield sentence_counter, {
                    "id": str(sentence_counter),
                    "tokens": current_tokens,
                    "ner_tags": current_labels,
                }