File size: 5,124 Bytes
4477da2
 
65b51a7
df81834
 
65b51a7
58dd593
8bd2e0e
2b933c2
06e4a0f
3ddb755
4477da2
06e4a0f
db585e5
 
 
2b933c2
3ddb755
 
2b933c2
06e4a0f
4477da2
784523e
 
06e4a0f
 
4477da2
 
1de13a4
4477da2
 
 
 
 
 
 
 
e9b5e26
6013237
 
e9b5e26
4477da2
 
 
 
 
3ddb755
4477da2
 
86cd38e
 
 
 
 
 
ed7f4e7
86cd38e
 
 
ed7f4e7
8a48f8c
 
ba86d56
8a48f8c
 
 
 
 
 
 
 
ba86d56
 
 
8a48f8c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
import datasets


logger = datasets.logging.get_logger(__name__)


_URL = "https://raw.githubusercontent.com/Kriyansparsana/demorepo/main/wnut17train%20(1).conll"

class indian_namesConfig(datasets.BuilderConfig):
    """The WNUT 17 Emerging Entities Dataset."""

    def __init__(self, **kwargs):
        """BuilderConfig for WNUT 17.
        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(indian_namesConfig, self).__init__(**kwargs)


class indian_names(datasets.GeneratorBasedBuilder):
    """The WNUT 17 Emerging Entities Dataset."""

    BUILDER_CONFIGS = [
        indian_namesConfig(
            name="indian_names", version=datasets.Version("1.0.0"), description="The WNUT 17 Emerging Entities Dataset"
        ),
    ]

    def _info(self):
        return datasets.DatasetInfo(
            features=datasets.Features(
                {
                    "id": datasets.Value("string"),
                    "tokens": datasets.Sequence(datasets.Value("string")),
                    "ner_tags": datasets.Sequence(
                        datasets.features.ClassLabel(
                            names=[
                                "O",
                                "B-corporation",
                                "I-corporation",
                                "B-person",
                            ]
                        )
                    ),
                }
            ),
            supervised_keys=None,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        urls_to_download = {
            "train": f"{_URL}",
        }
        downloaded_files = dl_manager.download_and_extract(urls_to_download)

        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
        ]

    #     def _generate_examples(self, filepath):
    #       logger.info("⏳ Generating examples from = %s", filepath)
    #     with open(filepath, encoding="utf-8") as f:
    #          current_tokens = []
    #          current_labels = []
    #          sentence_counter = 0
    #     for row in f:
    #         row = row.rstrip()
    #         if row:
    #             # Check if the delimiter ("\t") is present in the row
    #             if "\t" in row:
    #                 token, label = row.split("\t")
    #                 current_tokens.append(token)
    #                 current_labels.append(label)
    #         else:
    #             # New sentence
    #             if not current_tokens:
    #                 # Consecutive empty lines will cause empty sentences
    #                 continue
    #             assert len(current_tokens) == len(current_labels), "💔 between len of tokens & labels"
    #             sentence = (
    #                 sentence_counter,
    #                 {
    #                     "id": str(sentence_counter),
    #                     "tokens": current_tokens,
    #                     "ner_tags": current_labels,
    #                 },
    #             )
    #             sentence_counter += 1
    #             current_tokens = []
    #             current_labels = []
    #             yield sentence
    #     # Don't forget the last sentence in the dataset 🧐
    #     if current_tokens:
    #         yield sentence_counter, {
    #             "id": str(sentence_counter),
    #             "tokens": current_tokens,
    #             "ner_tags": current_labels,
    #         }
            
    def _generate_examples(self, filepath):
        logger.info("⏳ Generating examples from = %s", filepath)
        with open(filepath, encoding="utf-8") as f:
            current_tokens = []
            current_labels = []
            sentence_counter = 0
            for row in f:
                row = row.rstrip()
                if row:
                    token, label = row.split("\t")
                    current_tokens.append(token)
                    current_labels.append(label)
                else:
                    # New sentence
                    if not current_tokens:
                        # Consecutive empty lines will cause empty sentences
                        continue
                    assert len(current_tokens) == len(current_labels), "💔 between len of tokens & labels"
                    sentence = (
                        sentence_counter,
                        {
                            "id": str(sentence_counter),
                            "tokens": current_tokens,
                            "ner_tags": current_labels,
                        },
                    )
                    sentence_counter += 1
                    current_tokens = []
                    current_labels = []
                    yield sentence
            # Don't forget last sentence in dataset 🧐
            if current_tokens:
                yield sentence_counter, {
                    "id": str(sentence_counter),
                    "tokens": current_tokens,
                    "ner_tags": current_labels,
                }