File size: 2,784 Bytes
4477da2
 
65b51a7
df81834
 
65b51a7
70e71a3
9964826
8bd2e0e
2b933c2
930884e
3ddb755
4477da2
930884e
db585e5
 
 
2b933c2
3ddb755
 
2b933c2
930884e
4477da2
 
2b933c2
4477da2
 
 
 
 
 
 
 
 
 
 
930884e
 
 
4477da2
 
 
 
 
3ddb755
4477da2
 
3ddb755
 
930884e
 
2b933c2
3ddb755
ed7f4e7
3ddb755
930884e
3ddb755
ed7f4e7
8bd2e0e
28552e1
4477da2
930884e
 
 
 
 
 
 
 
 
 
 
 
 
db585e5
930884e
 
26629b1
930884e
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
import datasets


logger = datasets.logging.get_logger(__name__)


_URL = "https://github.com/Kriyansparsana/demorepo/blob/main/test.txt"


class indian_namesConfig(datasets.BuilderConfig):
    """BuilderConfig for Conll2003"""

    def __init__(self, **kwargs):
        """BuilderConfig forConll2003.
        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(indian_namesConfig, self).__init__(**kwargs)


class indian_names(datasets.GeneratorBasedBuilder):
    """Conll2003 dataset."""

    BUILDER_CONFIGS = [
        indian_namesConfig(name="indian_names", version=datasets.Version("1.0.0"), description="indian_names dataset"),
    ]

    def _info(self):
        return datasets.DatasetInfo(
            features=datasets.Features(
                {
                    "id": datasets.Value("string"),
                    "tokens": datasets.Sequence(datasets.Value("string")),
                    "ner_tags": datasets.Sequence(
                        datasets.features.ClassLabel(
                            names=[
                                "B-PER",
                                "B-ORG",
                                
                            ]
                        )
                    ),
                }
            ),
            supervised_keys=None,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        downloaded_file = dl_manager.download_and_extract(_URL)
        data_files = {
            "train": downloaded_file,
        }

        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}),
        ]

    def _generate_examples(self, filepath):
        logger.info("⏳ Generating examples from = %s", filepath)
        with open(filepath, encoding="utf-8") as f:
            guid = 0
            tokens = []
            ner_tags = []
            for line in f:
                if line.startswith("-DOCSTART-") or line == "" or line == "\n":
                    if tokens:
                        yield guid, {
                            "id": str(guid),
                            "tokens": tokens,
                            "ner_tags": ner_tags,
                        }
                        guid += 1
                        ner_tags = []
                else:
                    # conll2003 tokens are space separated
                    splits = line.split(" ")
                    tokens.append(splits[0])
                    ner_tags.append(splits[3].rstrip())
            # last example
            if tokens:
                yield guid, {
                    "id": str(guid),
                    "tokens": tokens,
                    "ner_tags": ner_tags,
                }