__author__ = 'dk'
### APP网络流行为图数据集建立
import random
import time
import gzip
import os
import sys
import dgl
import pickle
import logger_wrappers
import torch as th
import numpy as np
from construct_graph import build_graphs_from_flowcontainer_json,mtu,mtime, concurrent_time_threshold
import tqdm
from tqdm import tqdm
from tqdm import trange
import argparse
import json

root_dir  =  "./dataset/"
random.seed(100)
time_period= 60

import pandas
pandas.set_option('display.max_columns',200) #a就是你要设置显示的最大列数参数
pandas.set_option('display.max_rows',10) #b就是你要设置显示的最大的行数参数
pandas.set_option('display.width',2000) #x就是你要设置的显示的宽度，防止轻易换行

from zat.json_log_to_dataframe import JSONLogToDataFrame

def random_walk_sampling(graph, start_node, max_nodes):
    """
    随机游走采样
    :param graph: dgl.DGLGraph对象
    :param start_node: 起始节点
    :param max_nodes: 最大采样节点数
    :return: 子图
    """
    subgraph_nodes = set([start_node])  # 子图中的节点
    subgraph_edges = []  # 子图中的边
    count = 0
    while len(subgraph_nodes) < max_nodes:
        if count >= max_nodes :
            break
        current_node = np.random.choice(list(subgraph_nodes))  # 随机选择一个当前节点
        neighbors = graph.successors(current_node).numpy()  # 获取当前节点的邻居

        if len(neighbors) == 0:  # 如果没有邻居，跳过
            count += 1
            continue

        next_node = np.random.choice(neighbors)  # 随机选择一个邻居
        if next_node in subgraph_nodes:  # 如果邻居已经在子图中，跳过
            continue

        subgraph_nodes.add(next_node)  # 将邻居加入子图
        if graph.has_edges_between(current_node, next_node) :
            subgraph_edges.append((current_node, next_node))
        elif graph.has_edges_between(next_node, current_node) :
            subgraph_edges.append((next_node, current_node))# 添加边
        count += 1

    # 构建子图
    subgraph_nodes = list(subgraph_nodes)
    subgraph = graph.subgraph(subgraph_nodes, relabel_nodes=True)
    return subgraph


class ZeekFlowmeterJSONDataset:
    cic_iot_2022_category = {'Amazon Alexa Echo Dot 1':'Audio',
                            'Amazon Alexa Echo Dot 2':'Audio',
                            'Amazon Alexa Echo Spot':'Audio',
                            'Amazon Alexa Echo Studio':'Audio',
                            'Google Nest Mini':'Audio',
                            'Sonos One Speaker':'Audio',
                            'AMCREST WiFi Camera':'Camera',
                            'Arlo Base Station':'Camera',
                            'Arlo Q Camera':'Camera',
                            'Borun/Sichuan-AI Camera':'Camera',
                            'DCS8000LHA1 D-Link Mini Camera':'Camera',
                            'HeimVision Smart WiFi Camera':'Camera',
                            'Home Eye Camera':'Camera',
                            'Luohe Cam Dog':'Camera',
                            'Nest Indoor Camera':'Camera',
                            'Netatmo Camera':'Camera',
                            'SIMCAM 1S (AMPAKTec)':'Camera',
                            'Amazon Plug':'Home automation',
                            'Atomi Coffee Maker':'Home automation',
                            'Eufy HomeBase 2':'Home automation',
                            'Globe Lamp ESP_B1680C':'Home automation',
                            'Gosund ESP_039AAF Socket':'Home automation',
                            'Gosund ESP_032979 Plug':'Home automation',
                            'Gosund ESP_10098F Socket':'Home automation',
                            'Gosund ESP_0C3994 Plug':'Home automation',
                            'Gosund ESP_1ACEE1  Socket':'Home automation',
                            'Gosund ESP_147FF9 Plug':'Home automation',
                            'Gosund ESP_10ACD8 Plug':'Home automation',
                            'HeimVision SmartLife Radio/Lamp':'Home automation',
                            'Philips Hue Bridge':'Home automation',
                            'Ring Base Station AC:1236':'Home automation',
                            'iRobot Roomba':'Home automation',
                            'Smart Board':'Home automation',
                            'Teckin Plug 1':'Home automation',
                            'Teckin Plug 2':'Home automation',
                            'Yutron Plug 1':'Home automation',
                            'Yutron Plug 2':'Home automation',
                            'D-Link DCHS-161 Water Sensor':'Other',
                            'LG Smart TV':'Other',
                            'Netatmo Weather Station':'Other'}
    cic_iot_2022_devices = ['Amazon Alexa Echo Dot 1',
                            'Amazon Alexa Echo Dot 2',
                            'Amazon Alexa Echo Spot',
                            'Amazon Alexa Echo Studio',
                            'Google Nest Mini',
                            'Sonos One Speaker',
                            'AMCREST WiFi Camera',
                            'Arlo Base Station',
                            'Arlo Q Camera',
                            'Borun/Sichuan-AI Camera',
                            'DCS8000LHA1 D-Link Mini Camera',
                            'HeimVision Smart WiFi Camera',
                            'Home Eye Camera',
                            'Luohe Cam Dog',
                            'Nest Indoor Camera',
                            'Netatmo Camera',
                            'SIMCAM 1S (AMPAKTec)',
                            'Amazon Plug',
                            'Atomi Coffee Maker',
                            'Eufy HomeBase 2',
                            'Globe Lamp ESP_B1680C',
                            'Gosund ESP_039AAF Socket',
                            'Gosund ESP_032979 Plug',
                            'Gosund ESP_10098F Socket',
                            'Gosund ESP_0C3994 Plug',
                            'Gosund ESP_1ACEE1  Socket',
                            'Gosund ESP_147FF9 Plug',
                            'Gosund ESP_10ACD8 Plug',
                            'HeimVision SmartLife Radio/Lamp',
                            'Philips Hue Bridge',
                            'Ring Base Station AC:1236',
                            'iRobot Roomba',
                            'Smart Board',
                            'Teckin Plug 1',
                            'Teckin Plug 2',
                            'Yutron Plug 1',
                            'Yutron Plug 2',
                            'D-Link DCHS-161 Water Sensor',
                            'LG Smart TV',
                            'Netatmo Weather Station']
    cic_iot_2022_MAC_list={"1c:fe:2b:98:16:dd":"Amazon Alexa Echo Dot 1",
                            "a0:d0:dc:c4:08:ff":"Amazon Alexa Echo Dot 2",
                            "1c:12:b0:9b:0c:ec":"Amazon Alexa Echo Spot",
                            "08:7c:39:ce:6e:2a":"Amazon Alexa Echo Studio",
                            "cc:f4:11:9c:d0:00":"Google Nest Mini",
                            "48:a6:b8:f9:1b:88":"Sonos One Speaker",
                            "9c:8e:cd:1d:ab:9f":"AMCREST WiFi Camera",
                            "3c:37:86:6f:b9:51":"Arlo Base Station",
                            "40:5d:82:35:14:c8":"Arlo Q Camera",
                            "c0:e7:bf:0a:79:d1":"Borun/Sichuan-AI Camera",
                            "b0:c5:54:59:2e:99":"DCS8000LHA1 D-Link Mini Camera",
                            "44:01:bb:ec:10:4a":"HeimVision Smart WiFi Camera",
                            "34:75:63:73:f3:36":"Home Eye Camera",
                            "7c:a7:b0:cd:18:32":"Luohe Cam Dog",
                            "44:bb:3b:00:39:07":"Nest Indoor Camera",
                            "70:ee:50:68:0e:32":"Netatmo Camera",
                            "10:2c:6b:1b:43:be":"SIMCAM 1S (AMPAKTec)",
                            "b8:5f:98:d0:76:e6":"Amazon Plug",
                            "68:57:2d:56:ac:47":"Atomi Coffee Maker",
                            "8c:85:80:6c:b6:47":"Eufy HomeBase 2",
                            "50:02:91:b1:68:0c":"Globe Lamp ESP_B1680C",
                            "b8:f0:09:03:9a:af":"Gosund ESP_039AAF Socket",
                            "b8:f0:09:03:29:79":"Gosund ESP_032979 Plug",
                            "50:02:91:10:09:8f":"Gosund ESP_10098F Socket",
                            "c4:dd:57:0c:39:94":"Gosund ESP_0C3994 Plug",
                            "50:02:91:1a:ce:e1":"Gosund ESP_1ACEE1  Socket",
                            "24:a1:60:14:7f:f9":"Gosund ESP_147FF9 Plug",
                            "50:02:91:10:ac:d8":"Gosund ESP_10ACD8 Plug",
                            "d4:a6:51:30:64:b7":"HeimVision SmartLife Radio/Lamp",
                            "00:17:88:60:d6:4f":"Philips Hue Bridge",
                            "b0:09:da:3e:82:6c":"Ring Base Station AC:1236",
                            "50:14:79:37:80:18":"iRobot Roomba",
                            "00:02:75:f6:e3:cb":"Smart Board",
                            "d4:a6:51:76:06:64":"Teckin Plug 1",
                            "d4:a6:51:78:97:4e":"Teckin Plug 2",
                            "d4:a6:51:20:91:d1":"Yutron Plug 1",
                            "d4:a6:51:21:6c:29":"Yutron Plug 2",
                            "f0:b4:d2:f9:60:95":"D-Link DCHS-161 Water Sensor",
                            "ac:f1:08:4e:00:82":"LG Smart TV",
                            "70:ee:50:6b:a8:1a":"Netatmo Weather Station"}
    cic_iot_2017_ip_list={
        "192.168.10.50":"Web server 16 Public",
        "205.174.165.68":"Web server 16 Public",
        "192.168.10.51":"Ubuntu server 12 Public",
        "205.174.165.66":"Ubuntu server 12 Public",
        "192.168.10.19":"Ubuntu 14.4, 32B",
        "192.168.10.17":"Ubuntu 14.4, 64B",
        "192.168.10.16":"Ubuntu 16.4, 32B",
        "192.168.10.12":"Ubuntu 16.4, 64B",
        "192.168.10.9":"Win 7 Pro, 64B",
        "192.168.10.5":"Win 8.1, 64B",
        "192.168.10.8":"Win Vista, 64B",
        "192.168.10.14":"Win 10, pro 32B",
        "192.168.10.15":"Win 10, 64B",
        "192.168.10.25":"MAC"
    }

    def walk_to_find_labeled_dir(self,dataset_path):
        labeled_dirs = []
        for root, dirs, files in os.walk(dataset_path):
            if "flowmeter.log" in files:
                labeled_dirs.append(root)
        return labeled_dirs
    
    def __init__(self,graph_json_directory=root_dir+"CIC-IoT-2022/",mode='clear',dumpData=False,usedumpData=False,dumpFilename="dataset_builder.pkl.gzip",cross_version=False,test_split_rate=0.1):
        log_to_df = JSONLogToDataFrame()
        labeled_dirs = self.walk_to_find_labeled_dir(graph_json_directory)
        print("labeled_dirs:", labeled_dirs)
        zeek_df = None
        log_file = os.path.join(labeled_dirs[0], "flowmeter_with_label.log")
        print("file:", log_file)
        zeek_df = log_to_df.create_dataframe(log_file)

            # TODO: lookup cic_iot_2022_MAC_list to convert 'orig_l2_addr' and 'resp_l2_addr' to
            # 'orig_device_category', 'orig_device_name', 'resp_device_category', 'resp_device_name'
            # 这里device_category和device_name分出了客户端、服务端。几位同学边处理数据，边观察，看是否需要分离。orig_device_category, orig_device_name, resp_device_category, resp_device_name
        zeek_df['orig_device_name'] = ''
        zeek_df['resp_device_name'] = ''
        zeek_df['device_major_state'] = ''
        for orig_addr,resp_addr in zeek_df[['id.orig_h','id.resp_h']].values:
            if orig_addr in self.cic_iot_2017_ip_list.keys():
                orig_name = self.cic_iot_2017_ip_list[orig_addr]
                zeek_df['orig_device_name'][-1] = orig_name
            else:
                zeek_df['orig_device_name'][-1] = 'NaN'
            if resp_addr in self.cic_iot_2017_ip_list.keys():
                resp_name = self.cic_iot_2017_ip_list[resp_addr]
                zeek_df['resp_device_name'][-1] = resp_name
            else:
                zeek_df['resp_device_name'][-1] = 'NaN'

        print(zeek_df.head(5))
        
    
    def dumpData(self,dumpFileName=None):
        print("hello world!")
        
    def __next_batch(self,name,batch_size):
        print("hello world!")
    
    def next_train_batch(self,batch_size):
        return self.__next_batch('train',batch_size)
    
    def next_valid_batch(self,batch_size):
        return self.__next_batch('valid',batch_size)
    
    def next_test_batch(self,batch_size):
        return self.__next_batch('test',batch_size)
    
    def export_wf_dataset(self,path_dir,feature_name='pkt_length'):
        #把数据导出成wf-attacks模型可以处理的数据形式
        print("hello world!")

    def export_fgnet_dataset(self,path_dir,feature_name='pkt_length'):
        print("hello world!")


class FlowContainerJSONDataset:
    cic_iot_2017_ip_list = {
        "192.168.10.50": "Victim network-Web server 16 Public",
        "205.174.165.68": "Victim network-Web server 16 Public",
        "192.168.10.51": "Victim network-Ubuntu server 12 Public",
        "205.174.165.66": "Victim network-Ubuntu server 12 Public",
        "192.168.10.19": "Victim network-Ubuntu 14.4, 32B",
        "192.168.10.17": "Victim network-Ubuntu 14.4, 64B",
        "192.168.10.16": "Victim network-Ubuntu 16.4, 32B",
        "192.168.10.12": "Victim network-Ubuntu 16.4, 64B",
        "192.168.10.9": "Victim network-Win 7 Pro, 64B",
        "192.168.10.5": "Victim network-Win 8.1, 64B",
        "192.168.10.8": "Victim network-Win Vista, 64B",
        "192.168.10.14": "Victim network-Win 10, pro 32B",
        "192.168.10.15": "Victim network-Win 10, 64B",
        "192.168.10.25": "Victim network-MAC",
        "205.174.165.80": "Firewall",
        "172.16.0.1": "Firewall",
        "192.168.10.3": "DNS+ DC Server",
        "205.174.165.73": "Attackers network-Kali",
        "205.174.165.69": "Attackers network-Win 1",
        "205.174.165.70": "Attackers network-Win 2",
        "205.174.165.71": "Attackers network-Win 3"
    }

    def __init__(self,graph_json_directory=root_dir+"Fgnet-2022-D1/",mode='clear',dumpData=False,usedumpData=False,dumpFilename="dataset_builder.pkl.gzip",cross_version=False,test_split_rate=0.1):
        '''
        :param graph_json_directory:  图json数据目标
        :param mode:                  使用clear数据(`clear`)还是带噪声的noise数据(`noise`),还是两种数据集都兼顾(`all`)
        :param dumpData:              每次导入数据都特别慢,当dumpData打开的时候,会把数据直接提取出来
        :param test_split_rate:       数据里面划分出来的测试集占的总样本的比例,默认10%.其中一半拿来做test dataset，一半拿来做validate dataset
        :param dumpFilename:          把数据导出到那个缓存文件,或者从那个缓存文件导入数据
        :param cross_version:         True: 跨版本的数据集
        :return:
        '''
        self.dumpFileName = dumpFilename
        if usedumpData==True and os.path.exists(dumpFilename):
            fp = gzip.GzipFile(dumpFilename,"rb")#创建文件对象
            data=pickle.load(fp)
            fp.close()
            self.labelName = data['labelName']
            self.labelNameSet=data['labelNameSet']
            self.graphs = data['graphs']
            self.labelId = data['labelId']
            self.train_index = data['train_index']
            self.test_index = data['test_index']
            self.valid_index = data['valid_index']
            info ='Load dump data from {0}'.format(dumpFilename)
            logger_wrappers.warning(info)
        else:
            if os.path.isdir(graph_json_directory) == False:
                info = '{0} is not a directory'.format(graph_json_directory)
                logger_wrappers.error(info)
                raise BaseException(info)
            assert mode in ['clear', 'noise', 'all']
            self.labelName = []
            self.labelNameSet = {}
            self.labelId = []
            self.graphs = []
            # self.flowCounter = 0

            _labelNameSet = []
            for _root, _dirs, _files in os.walk(graph_json_directory):
                if _root == graph_json_directory or len(_files) == 0:
                    continue
                _root = _root.replace("\\", "/")
                versionName = _root.split("/")[-1]
                packageName = _root.split("/")[-2]  # app数据
                # packageName = _root.split('/')[-1]      #区块链数据
                labelName = packageName
                _labelNameSet.append(labelName)
            _labelNameSet.sort()

            for i in range(len(_labelNameSet)):
                self.labelNameSet.setdefault(_labelNameSet[i], len(self.labelNameSet))

            for _root, _dirs, _files in os.walk(graph_json_directory):
                if _root == graph_json_directory or len(_files) == 0:
                    continue
                _root = _root.replace("\\", "/")
                versionName = _root.split("/")[-1]
                packageName = _root.split("/")[-2]  ## app做法
                # packageName = _root.split('/')[-1]      ## 区块链
                labelName = packageName
                print(labelName)
                # random.shuffle(_files)
                for index in trange(len(_files)):
                    file = _files[index]
                    if file == ".DS_Store":
                        continue
                    json_fname = (_root + "\\" + file).replace("\\", "/")

                    ##以下是给app pcap进行过滤的
                    # if mode != 'all' and mode not in file:
                    #    continue

                    gs = build_graphs_from_flowcontainer_json(json_fname, time_period=time_period)
                    if len(gs) < 1 or gs[0] == None:
                        continue
                    assert len(gs) == 1
                    self.graphs += gs
                    self.labelName += [labelName] * len(gs)
                    self.labelId += [self.labelNameSet[labelName]] * len(gs)
                    assert self.labelId[-1] in range(len(self.labelNameSet))

            assert len(self.graphs) == len(self.labelId)
            assert len(self.graphs) == len(self.labelName)

            info = "Build {0} graph over {1} classes, {2} graph per class. {3} flow.".format(len(self.graphs),len(self.labelNameSet),len(self.graphs)//len(self.labelNameSet),self.flowCounter)
            logger_wrappers.info(info)
            print(info)



            ###划分训练集,验证集,测试集
            ###比例: 90: 05 : 05
            self.train_index = []
            self.valid_index = []
            self.test_index =  []

            #随机打乱
            index =list( range(len(self.graphs)))
            random.shuffle(index)
            test_split = int(test_split_rate * 100)//2
            for i in index:
                r = random.randint(0,100)
                if r in range(0,test_split):
                    self.test_index.append(i)
                elif r in range(test_split,2*test_split):
                    self.valid_index.append(i)
                else:
                    self.train_index.append(i)

            if dumpData :
                self.dumpData()
        self.class_aliasname ={}        #label的可读别名
        labelNameSet = list(self.labelNameSet)
        labelNameSet.sort()             #对标签排序
        for i in range(len(labelNameSet)):
            self.class_aliasname.setdefault(i,labelNameSet[i])
        print('Train:{0},Test:{1},Valid:{2}'.format(len(self.train_index),len(self.test_index),len(self.valid_index)))

        self.train_watch = 0
        self.test_watch =  0
        self.valid_watch = 0
        self.epoch_over = False

    def dumpData(self,dumpFileName=None):
        if dumpFileName == None:
            dumpFileName = self.dumpFileName
        fp = gzip.GzipFile(dumpFileName,"wb")
        pickle.dump({
                'graphs':self.graphs,
                'flowCounter':self.flowCounter,
                'labelName':self.labelName,
                'labelNameSet':self.labelNameSet,
                'labelId':self.labelId,
                'train_index':self.train_index,
                'valid_index':self.valid_index,
                'test_index':self.test_index
            },file=fp,protocol=-1)
        fp.close()

    def __next_batch(self,name,batch_size):
        graphs =[]
        labels =[]
        for i in range(batch_size):
            if name == 'train':
                graphs.append(self.graphs[self.train_index[self.train_watch]])
                labels.append(self.labelId[self.train_index[self.train_watch]])

                if (self.train_watch + 1) == len(self.train_index):
                    self.epoch_over +=1

                self.train_watch = (self.train_watch + 1) % len(self.train_index)
            elif name =='valid':
                graphs.append(self.graphs[self.valid_index[self.valid_watch]])
                labels.append(self.labelId[self.valid_index[self.valid_watch]])
                self.valid_watch = (self.valid_watch + 1) % len(self.valid_index)
            else:
                graphs.append(self.graphs[self.test_index[self.test_watch]])
                labels.append(self.labelId[self.test_index[self.test_watch]])
                self.test_watch = (self.test_watch + 1) % len(self.test_index)
        return dgl.batch(graphs),th.tensor(labels)
    
    def next_train_batch(self,batch_size):
        return self.__next_batch('train',batch_size)
    
    def next_valid_batch(self,batch_size):
        return self.__next_batch('valid',batch_size)
    
    def next_test_batch(self,batch_size):
        return self.__next_batch('test',batch_size)
    
    def export_wf_dataset(self,path_dir,feature_name='pkt_length'):
        #把数据导出成wf-attacks模型可以处理的数据形式
        if os.path.exists(path_dir)== False:
            os.makedirs(path_dir)
        assert  feature_name in ['pkt_length','arv_time']
        X_train =[]
        y_train =[]
        X_valid =[]
        y_valid =[]
        X_test =[]
        y_test =[]
        #print(self.train_index)
        #print(self.test_index)
        #print(self.valid_index)
        #print(self.labelId)
        #print('graph totoal:',len(self.graphs))
        for i in self.train_index:
            X_train.append(self.graphs[i].ndata[feature_name]*mtu)
            y_train += [self.labelId[i]] * len(self.graphs[i].nodes())
        for i in self.test_index:
            X_test.append(self.graphs[i].ndata[feature_name]*mtu)
            y_test += [self.labelId[i]] * len(self.graphs[i].nodes())
        for i in self.valid_index:
            X_valid.append(self.graphs[i].ndata[feature_name]*mtu)
            y_valid += [self.labelId[i]] * len(self.graphs[i].nodes())

        #合并X
        X_train = np.concatenate(X_train)
        X_test = np.concatenate(X_test)
        X_valid =np.concatenate(X_valid)

        X_train = np.reshape(X_train,(-1,1000,1))
        X_test = np.reshape(X_test,(-1,1000,1))
        X_valid = np.reshape(X_valid,(-1,1000,1))

        with gzip.GzipFile(path_dir+"/"+"X_train_"+feature_name+".pkl","wb") as fp:
            pickle.dump(X_train,fp,-1)
        with gzip.GzipFile(path_dir+"/"+"X_valid_"+feature_name+".pkl","wb") as fp:
            pickle.dump(X_valid,fp,-1)
        with gzip.GzipFile(path_dir+"/"+"X_test_"+feature_name+".pkl","wb") as fp:
            pickle.dump(X_test,fp,-1)

        with gzip.GzipFile(path_dir+"/"+"y_train_"+feature_name+".pkl","wb") as fp:
            pickle.dump(y_train,fp,-1)
        with gzip.GzipFile(path_dir+"/"+"y_valid_"+feature_name+".pkl","wb") as fp:
            pickle.dump(y_valid,fp,-1)
        with gzip.GzipFile(path_dir+"/"+"y_test_"+feature_name+".pkl","wb") as fp:
            pickle.dump(y_test,fp,-1)

        print('export {0} flows'.format(X_train.shape[0]))
        assert  X_train.shape[0] ==len(y_train)
        assert  X_valid.shape[0] ==len(y_valid)
        assert  X_test.shape[0] ==len(y_test)

    def export_fgnet_dataset(self,path_dir,feature_name='pkt_length'):
        print('export to fgnet format')
        if os.path.exists(path_dir) == False:
            os.makedirs(path_dir)
        assert  feature_name in ['pkt_length','arv_time']
        flowCounter = 0
        for i in range(len(self.labelName)):
            ##确定名字
            package_name = self.labelName[i]
            ##提取包长
            feature_matrix =mtu * self.graphs[i].ndata[feature_name]
            fp = open(path_dir+package_name+".num","a")
            for j in range(feature_matrix.shape[0]):
                feature = ";"+"\t".join([str(int(feature_matrix[j][0][i__])) for i__ in range(feature_matrix.shape[2])])+"\t;\n"
                fp.writelines(feature)
                flowCounter+=1
            fp.close()
        print('export {0} flows'.format(flowCounter))
    @property
    def flowCounter(self):
        flowcounter = 0
        for i in range(len(self.labelName)):
            flowcounter += self.graphs[i].ndata['pkt_length'].shape[0]
        return  flowcounter


if __name__ == '__main__':

    '''#规范数据集的构建
    '''
    #构建0623 小米5plus clear数据集

    parser = argparse.ArgumentParser(description='fgnet model')
    parser.add_argument('--dataset', '-d', type=str, help='dataset name', required=True, dest='dataset')
    args = parser.parse_args()
    dataset = args.dataset
    print("dataset:",dataset)

    graph_json_directory = os.path.join("./","dataset",dataset)
    if not os.path.exists(graph_json_directory):
        print(f"data_path {graph_json_directory} does not exist!")
        raise 1
    print("graph_json_directory:",graph_json_directory)
    
    data_path = './data/fgnet' + '_' + dataset
    dumpFilename = data_path+"/dataset_builder.pkl.gzip"
    if not os.path.exists(data_path):
            os.makedirs(data_path)
    print("dumpFilename:",dumpFilename)

    dataset = FlowContainerJSONDataset(mode='clear',
                      dumpData=True,usedumpData=False,
                      dumpFilename=dumpFilename,
                      cross_version=False,
                      test_split_rate=0.1,
                      graph_json_directory = graph_json_directory)
    del dataset

    # dataset = Dataset(mode='clear',
    #                   dumpData=True,usedumpData=False,
    #                   dumpFilename="D1_timecost.pkl.gzip".format(time_period),
    #                   cross_version=False,
    #                   test_split_rate=0.1,
    #                   graph_json_directory= r'../../../dataset/D1')
    # del dataset
