File size: 2,463 Bytes
1f1712b
13485a3
6a2bca2
13485a3
 
6a2bca2
13485a3
 
 
 
 
 
 
 
 
 
 
 
b80aa7c
13485a3
5eb1119
13485a3
 
1f1712b
13485a3
 
 
 
 
 
 
 
 
f64df1e
13485a3
 
 
 
 
 
 
 
7eaf0d5
13485a3
 
 
 
 
 
f64df1e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
import datasets
import pandas as pd
import os


class MyDataset(datasets.GeneratorBasedBuilder):
    def _info(self):

        return datasets.DatasetInfo(
            description="DESCRIPTION",
            features=datasets.Features(
                {"domain": datasets.Value("string"), "label": datasets.Value("string")}
            ),
            supervised_keys=("domain", "label"),
            homepage="_HOMEPAGE",
        )


    def _split_generators(self, dl_manager: datasets.DownloadConfig):
        # Load your local dataset file
        csv_path = "https://huggingface.co/datasets/harpomaxx/dga-detection/raw/main/argencon.csv.gz"

        return [
            datasets.SplitGenerator(
                name=split,
                gen_kwargs={
                    "filepath": csv_path,
                    "split": split,
                },
            )
            for split in ["train", "test", "validation"]
        ]

    def _generate_examples_old(
        self,
        filepath: str,
        split: str,
    ):
        # Read your CSV dataset
        dataset = pd.read_csv(filepath)

        # You can filter or split your dataset based on the 'split' argument if necessary
        dataset = dataset[dataset["split"] == split]
        # Generate examples
        for index, row in dataset.iterrows():
            yield index, {
                "domain": row["domain"],
                "label": row["label"],
            }

    
    def _generate_examples(
        self,
        filepath: str,
        split: str,
    ):
        # Read your CSV dataset
        dataset = pd.read_csv(filepath)

        # Get the total number of rows
        total_rows = len(dataset)

        # Define the ratio for train, test, and validation splits
        train_ratio = 0.7
        test_ratio = 0.2

        # Calculate the indices for each split
        train_end = int(train_ratio * total_rows)
        test_end = train_end + int(test_ratio * total_rows)

        # Filter your dataset based on the 'split' argument
        if split == "train":
            dataset = dataset.iloc[:train_end]
        elif split == "test":
            dataset = dataset.iloc[train_end:test_end]
        elif split == "validation":
            dataset = dataset.iloc[test_end:]

        # Generate examples
        for index, row in dataset.iterrows():
            yield index, {
                "domain": row["domain"],
                "label": row["label"],
            }