File size: 4,211 Bytes
1f1712b
13485a3
6a2bca2
13485a3
8913c88
 
 
 
 
 
 
 
 
13485a3
6a2bca2
13485a3
84d8cf7
13485a3
8913c88
13485a3
a1d6299
 
d8e356f
a1d6299
13485a3
84d8cf7
13485a3
 
 
b80aa7c
84d8cf7
013724d
13485a3
84d8cf7
13485a3
1f1712b
13485a3
 
 
 
 
 
 
 
 
84d8cf7
 
 
 
 
 
 
 
f64df1e
 
 
 
 
 
312ccd7
84d8cf7
 
a1d6299
84d8cf7
f64df1e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a1d6299
f64df1e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import datasets
import pandas as pd
import os

_DESCRIPTION = """\
A dataset containing both DGA and normal domain names. The normal domain names were taken from the Alexa top one million domains. An additional 3,161 normal 
domains were included in the dataset, provided by the Bambenek Consulting feed. This later group is particularly interesting since it consists of suspicious domain 
names that were not generated by DGA. Therefore, the total amount of domains normal in the dataset is 1,003,161. DGA domains were obtained from the repositories 
of DGA domains of Andrey Abakumov and John Bambenek. The total amount of DGA domains is 1,915,335, and they correspond to 51 different malware families. DGA domains 
were generated by 51 different malware families. About the 55% of of the DGA portion of dataset is composed of samples from the Banjori, Post, Timba, Cryptolocker, 
Ramdo and Conficker malware.
"""
_HOMEPAGE = "https://https://huggingface.co/datasets/harpomaxx/dga-detection"

class MyDataset(datasets.GeneratorBasedBuilder):
    def _info(self):
        # Provide metadata for the dataset
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {"domain": datasets.Value("string"), 
                 "label": datasets.Value("string"),
                 "class": datasets.Value("int32")
                }
            ),
            supervised_keys=("domain", "class"),
            homepage="_HOMEPAGE",
        )

    def _split_generators(self, dl_manager: datasets.DownloadConfig):
        # Load your dataset file
        csv_path = "https://huggingface.co/datasets/harpomaxx/dga-detection/resolve/main/argencon.csv.gz"

        # Create SplitGenerators for each dataset split (train, test, validation)
        return [
            datasets.SplitGenerator(
                name=split,
                gen_kwargs={
                    "filepath": csv_path,
                    "split": split,
                },
            )
            for split in ["train", "test", "validation"]
        ]

    """""
    The data variable in the _generate_examples() method is a temporary variable that holds the portion of the dataset based on the current split. 
    The datasets.SplitGenerator in the _split_generators() method is responsible for creating the three different keys ('train', 'test', 'validation').When you load your 
    dataset using load_dataset(), the Hugging Face Datasets library will automatically call the _split_generators() method to create the three different dataset splits. 
    Then, it will call the _generate_examples() method for each split separately, passing the corresponding split name as the split argument. 
    This is how the different keys are created. To clarify, the _generate_examples() method processes one split at a time, and the Datasets library combines the results 
    to create a final dataset with keys for 'train', 'test', and 'validation'.
    """
    def _generate_examples(
        self,
        filepath: str,
        split: str,
    ):
        # Read your CSV dataset
        dataset = pd.read_csv(filepath,compression='gzip')

        # Create the 'class' column based on the 'label' column
        dataset['class'] = dataset['label'].apply(lambda x: 0 if 'normal' in x else 1)

        # Get the total number of rows
        total_rows = len(dataset)

        # Define the ratio for train, test, and validation splits
        train_ratio = 0.7
        test_ratio = 0.2

        # Calculate the indices for each split
        train_end = int(train_ratio * total_rows)
        test_end = train_end + int(test_ratio * total_rows)

        # Filter your dataset based on the 'split' argument
        if split == "train":
            dataset = dataset.iloc[:train_end]
        elif split == "test":
            dataset = dataset.iloc[train_end:test_end]
        elif split == "validation":
            dataset = dataset.iloc[test_end:]

        # Generate examples
        for index, row in dataset.iterrows():
            yield index, {
                "domain": row["domain"],
                "label": row["label"],
                "class": row["class"],
            }