import pandas as pd
import os, json, time, glob, random, argparse
from tqdm import tqdm
from typing import Tuple

class Filter:
    def handler(self, 
        csv_dir:str, output_dir:str, extractor:int, keep_original_files:bool,
        keep_all_features:bool=True):
        """Handle csv file.

        Original csv files output by extractor are organized as following structure:
        
        csv_dir
            ├── domain_1
            ├── domain_2
            ...
            └── domain_n

        Please pay attention that "csv_dir" refer to "time" (directory name) in extractor.py.

        This method will apply post-processing to csv files and reorganize data structure,
        gathering all the flow entries whitin one csv file. 
        
        Besides, a label (index) - domain dictionary (json format) will be generated.

        Args:
            csv_dir: Directory to find output of extractor.py.
            ouput_dir: Directory to save dataset and lable - domain dictionary.
            extractor: nfstream or cicflowmeter.
            keep_original_files: Whether to keep original_file.
            keep_all_features: As its name.
        """
        # extractor related   
        filter = None
        if extractor == "nfstream":
            filter = self._nfstream_filter
        elif extractor == "cicflowmeter":
            filter = self._cicflowmeter_filter
        else:
            raise TypeError("Please use correct extractor (cicflowmeter or nfstream)")

        df = pd.DataFrame()
        label2domain = {}
        t = time.strftime("%Y%m%d%H%M%S", time.localtime())
        dataset_name = "dataset-{}-{}.csv".format(extractor, t)
        dictionary_name = "index2domain-{}-{}.json".format(extractor, t)
        domains = os.listdir(csv_dir)
        label = 0
        dl = []

        for domain in tqdm(domains, ncols=50):
            csv_domain_dir = os.path.join(csv_dir, domain)
            csv_files = glob.glob(os.path.join(csv_domain_dir, "**.csv"))  

            for csv_file in csv_files:
                df_temp = filter(csv_file)
                df_temp["label"] = label
                dl.append(df_temp)
    
            label2domain[label] = domain
            label += 1

        df = pd.concat(dl)

        if not os.path.exists(output_dir):
            os.makedirs(output_dir)

        # remove useless features (std == 0)
        if not keep_all_features:
            df = df.loc[:, df.std() > 0]

        # save
        dataset_path = os.path.join(output_dir, dataset_name)
        label2domain_path = os.path.join(output_dir, dictionary_name)
        
        df.to_csv(dataset_path, index=False)

        with open(label2domain_path, "w") as f_out:
            f_out.write(json.dumps(label2domain, indent=4))
        
        # shuffle
        self._shuffle(
            dataset_path,
            label2domain_path,
            keep_original_files
        )

    def _label_to_idx_range_ends(self, label:int, num:int) -> Tuple[int, int]:
        """Map label to index range of Pandas DataFrame.

        Args:
            label: Label, int number.
            num: The number of traffic trace of each domain.

        Returns:
            Tuple, first index and last index.
        """
        return label*num, label*num+(num)

    def _shuffle(self, dataset_path:str, label2domain_path:str, \
        keep_original_files:bool=False, num:int=100) \
        -> Tuple[str, str]:
        """Scrambles the data set element order.

        This method is reproducible.

        Args:
            dataset_path: Path to dataset.
            label2domain_path: Path to label2domain file. 
            keep_original_files: Whether to keep original_file.
            num: The number of traffic trace of each domain.

        Returns:
            tuple, consist of:
                - shuffled_dataset_path: New dataset path.
                - shuffled_label2domain_path: New idx2domain file path.
        """
        random.seed(1) # to make result reproducible
        label2domain, shuffled_label2domain = {}, {}
        shuffled_df = pd.DataFrame()
        dl = []

        # load data
        with open(label2domain_path, "r") as f_label2domain:
            label2domain = json.load(f_label2domain)

        num_of_domain = len(label2domain)
        shuffled_label_list = [i for i in range(num_of_domain)]
        random.shuffle(shuffled_label_list)

        df = pd.read_csv(dataset_path)

        # shuffle
        for new_label, label in enumerate(shuffled_label_list):
            rows = self._label_to_idx_range_ends(label, num)
            df.iloc[rows[0]:rows[1], -1] = new_label # update label
            dl.append(df.iloc[rows[0]:rows[1]])
            shuffled_label2domain[new_label] = label2domain[str(label)] 

        shuffled_df = pd.concat(dl)

        # save
        shuffled_dataset_path = dataset_path.rsplit(".", 1)[0] + "-shuffled." + \
            dataset_path.rsplit(".", 1)[1]
        shuffled_label2domain_path = label2domain_path.rsplit(".", 1)[0] + "-shuffled." + \
            label2domain_path.rsplit(".", 1)[1]
        
        shuffled_df.to_csv(shuffled_dataset_path, index=False)

        with open(shuffled_label2domain_path, "w") as f_out:
            f_out.write(json.dumps(shuffled_label2domain, indent=4))

        # delete files
        if not keep_original_files:
            os.remove(dataset_path)
            os.remove(label2domain_path)

        return shuffled_dataset_path, shuffled_label2domain_path

    def _cicflowmeter_filter(self, csv_file:str) -> pd.DataFrame:
        """The goals of filter are:
        1. Remove irrelevant flow
        2. Remove irrelevant feature, like `src_ip`, `dst_ip`, etc
        
        A DoH traffic trace capture by tcpdump usually consist of several (2 or 3) flow.
        By observed, flow with longest `flow_duration` is target flow.
        
        Args:
            csv_file: A csv file generate by cicflowmeter.

        Returns:
            Post-processed DataFrame.
        """
        df = pd.read_csv(csv_file)
        # remove irrelevant flow
        df = df[df["flow_duration"]==df["flow_duration"].max()]

        # remove irrelevant features [src_ip, dst_ip, src_port, dst_port, protocol, timestamp]
        df = df.drop(["src_ip", "dst_ip", "src_port", "dst_port", "protocol", "timestamp"], axis=1)    
        
        return df

    def _nfstream_filter(self, csv_file:str) -> pd.DataFrame:
        """The goals of filter are:
        1. Remove irrelevant flow
        2. Remove irrelevant feature, like `src_ip`, `dst_ip`, etc
        
        Args:
            csv_file: A csv file generate by cicflowmeter.

        Returns:
            Post-processed DataFrame.
        """
        df = pd.read_csv(csv_file)

        # remove irrelevant flow
        df = df[df["bidirectional_duration_ms"]==df["bidirectional_duration_ms"].max()]

        # remove irrelevant features 
        df = df.drop(
            ["id",
            "expiration_id",
            "src_ip",
            "src_mac",
            "src_oui",
            "src_port",
            "dst_ip",
            "dst_mac",
            "dst_oui",
            "dst_port",
            "bidirectional_first_seen_ms",
            "bidirectional_last_seen_ms",
            "src2dst_first_seen_ms",
            "src2dst_last_seen_ms",
            "dst2src_first_seen_ms",
            "dst2src_last_seen_ms",
            "application_name",
            "application_category_name",
            "application_is_guessed",
            "requested_server_name",
            "client_fingerprint",
            "server_fingerprint",
            "user_agent",
            "content_type",
            ], axis=1)

        return df

# def test_shuffle():
#     f = Filter({})
#     f._shuffle(
#         dataset_path = "data/dataset/dataset-20220228113900.csv",
#         label2domain_path = "data/dataset/index2domain-20220228113900.json",
#         remove_original_files = False
#     )

def main():
    # example:
    # python.py -i data/output_of_nfstream/20220302210851
    parser = argparse.ArgumentParser(description="Integrate dataset and remove irrelevant features.")

    parser.add_argument(
        "--input_dir", "-i",
        type = str,
        required = True,
        help = "directory generated by extractor \
            (Refer to 'time' directory, \
            e.g. data/output_of_cicflowmeter/20220227235702)")

    parser.add_argument(
        "--output_dir", "-o",
        type = str,
        default = "data/dataset",
        help = "directory to save dataset (default 'data/dataset')")

    parser.add_argument(
        "--keep_original_files", "-k",
        action = "store_true",
        help = "keep original files and shuffled files")

    parser.add_argument(
        "--keep_all_features", "-ka",
        action = "store_true",
        help = "Not to remove useless features")

    parser.add_argument(
        "--extractor", "-e",
        type = int,
        default = 1,
        choices = [1, 2],
        help = "supported extractors: 1. nfstream 2.cicflowmeter  (default 1)")

    args = parser.parse_args()

    f = Filter()

    num2extractor = {
        1: "nfstream",
        2: "cicflowmeter"
    }

    f.handler(
        csv_dir = args.input_dir,
        output_dir = args.output_dir,
        keep_original_files = args.keep_original_files,
        extractor = num2extractor[args.extractor],
        keep_all_features=args.keep_all_features
    )

if __name__ == "__main__":
    main() 

    # test_shuffle()

    # f = Filter()

    # f.handle(
    #     csv_dir="data/output_of_cicflowmeter/20220227235702_subset",
    #     output_dir="data/dataset"
    # )

    # f.handle(
    #     csv_dir="data/output_of_cicflowmeter/20220227235702",
    #     output_dir="data/dataset"
    # )
