import logging, json, os, glob, time, random, argparse
from typing import Tuple
from tqdm import tqdm
import numpy as np
import pandas as pd

class StatisticalFeatureExtractor:
    def __init__(self, config={}):
        self.log_file = config.get("log_file", "SFextractor.log")
        self.output_dir = config.get("output_dir", "data/dataset")
        self.sequence_type = config.get("sequence_type", "analyzer")

        logging.basicConfig(
            filename=self.log_file,
            format="%(asctime)s - %(levelname)s - %(message)s ",
            level=logging.INFO
        )

    def get_target_flow(self, 
        filepath:str, condition:str="largest", return_flow_id:bool=False):
        """This function will remove irrelevant flows from flows record file
        (a json file generated by Analyzer) and just keep the flow that meet 
        the user specified condition.

        User specified condition can be:
        - "largest": keep flow with largest size.
        - "longest": keep flow with longest duration.
        
        Further more, as TLS handshake (first 10 packets) and TCP tear down (last 5 packets)
        carry little information, this function will remove sequence elements related to these period.

        Args:
            filepath: File path.
            condition: "largest" or "longest"
            return_flow_id: Whether return flow ID.
        
        Retuens:
            Dict with flow information.
            If return_flow_id is True, {flow_id: flow} will be return.
        """
        with open(filepath, "r") as f_in:
            flows = json.load(f_in)

        target_flow_id, longest_duration, largest_size = None, -1, -1

        if  condition == "largest":
        # find flow with largest size
            for flow_id in flows:
                ps_list = flows[flow_id]["ps_list"]
                size = np.sum(ps_list)

                if size >= largest_size:
                    target_flow_id = flow_id
                    largest_size = size

        elif condition == "longest":
        # find flow with longest duration
            for flow_id in flows:
                ts_list = flows[flow_id]["ts_list"]
                duration = ts_list[-1] - ts_list[0]

                if duration >= longest_duration:
                    target_flow_id = flow_id
                    longest_duration = duration
        else:
            raise ValueError("condition should be 'largest' or 'longest'")
        
        target_flow = flows[target_flow_id]

        if self.sequence_type == "analyzer":
            try:
                assert(len(target_flow["ps_list"]) > 15)
                assert(len(target_flow["ps_list"]) == len(target_flow["piat_list"]) \
                    and len(target_flow["ps_list"]) == len(target_flow["direction_list"]) \
                    and len(target_flow["ps_list"]) == len(target_flow["ts_list"]))
                target_flow["ps_list"] = target_flow["ps_list"][10:-5]
                target_flow["piat_list"] = target_flow["piat_list"][10:-5]
                target_flow["direction_list"] = target_flow["direction_list"][10:-5]
                target_flow["ts_list"] = target_flow["ts_list"][10:-5]
                
            except Exception as e:
                logging.error("This file is too short to be handle: {}".format(filepath))
                print(e)
        
        if return_flow_id:
            return {target_flow_id: target_flow}
        return target_flow

    def _get_subseq_by_direction_list(self, seq:list, direction_list:list, direction:str):
        """
        Args:
            seq: Original sequence.
            direction: str or int, "src2dst"(0) or "dst2src"(1).

        Retuens:
            Subsequence.
        """
        subseq = []

        if direction == "src2dst": direction = 0
        if direction == "dst2src": direction = 1

        for e, d in zip(seq, direction_list):
            if d == direction:
                subseq.append(e)
        
        if subseq == []:
            subseq = [0]

        return subseq

    def time_related_features(self, flow:dict) -> dict:
        """
        bidirectional_duration_ms
        src2dst_duration_ms
        dst2src_duration_ms
        bidirectional_min_piat_ms
        bidirectional_mean_piat_ms
        bidirectional_stddev_piat_ms
        bidirectional_max_piat_ms
        src2dst_min_piat_ms
        src2dst_mean_piat_ms
        src2dst_stddev_piat_ms
        src2dst_max_piat_ms
        dst2src_min_piat_ms
        dst2src_mean_piat_ms
        dst2src_stddev_piat_ms
        dst2src_max_piat_ms
        """
        ts_list, piat_list, direction_list = [], [], []
        if self.sequence_type in ["analyzer", "injector"]:
            ts_list, piat_list, direction_list = flow["ts_list"], flow["piat_list"], flow["direction_list"]
        elif self.sequence_type == "mixer":
            ts_list, piat_list, direction_list = flow["offset_list"], flow["piat_list"], flow["direction_list"]

        bidirectional_duration_ms = (ts_list[-1] - ts_list[0]) * 1000

        src2dst_ts_list = self._get_subseq_by_direction_list(ts_list, direction_list, "src2dst")
        dst2src_ts_list = self._get_subseq_by_direction_list(ts_list, direction_list, "dst2src")

        src2dst_duration_ms = (src2dst_ts_list[-1] - src2dst_ts_list[0]) * 1000
        dst2src_duration_ms = (dst2src_ts_list[-1] - dst2src_ts_list[0]) * 1000

        ########################################

        bidirectional_min_piat_ms = np.min(piat_list)
        bidirectional_mean_piat_ms = np.mean(piat_list)
        bidirectional_stddev_piat_ms = np.std(piat_list)
        bidirectional_max_piat_ms = np.max(piat_list)

        ########################################

        t0 = np.array([0] + src2dst_ts_list, dtype=np.float64)
        t1 = np.array(src2dst_ts_list + [0], dtype=np.float64)

        src2dst_piat_list =  np.hstack((0, (t1 - t0)[1:-1]))
        src2dst_piat_list = src2dst_piat_list * 1000

        src2dst_min_piat_ms = np.min(src2dst_piat_list)
        src2dst_mean_piat_ms = np.mean(src2dst_piat_list)
        src2dst_stddev_piat_ms = np.std(src2dst_piat_list)
        src2dst_max_piat_ms = np.max(src2dst_piat_list)

        ########################################

        t3 = np.array([0] + dst2src_ts_list, dtype=np.float64)
        t4 = np.array(dst2src_ts_list + [0], dtype=np.float64)

        dst2src_piat_list = np.hstack((0, (t4 - t3)[1:-1]))
        dst2src_piat_list = dst2src_piat_list * 1000

        dst2src_min_piat_ms = np.min(dst2src_piat_list)
        dst2src_mean_piat_ms = np.mean(dst2src_piat_list)
        dst2src_stddev_piat_ms = np.std(dst2src_piat_list)
        dst2src_max_piat_ms = np.max(dst2src_piat_list)

        ########################################

        result = {
            "bidirectional_duration_ms":    bidirectional_duration_ms,              
            "src2dst_duration_ms":          src2dst_duration_ms, 
            "dst2src_duration_ms":          dst2src_duration_ms, 
            "bidirectional_min_piat_ms":    bidirectional_min_piat_ms,          
            "bidirectional_mean_piat_ms":   bidirectional_mean_piat_ms,          
            "bidirectional_stddev_piat_ms": bidirectional_stddev_piat_ms,          
            "bidirectional_max_piat_ms":    bidirectional_max_piat_ms,          
            "src2dst_min_piat_ms":          src2dst_min_piat_ms,  
            "src2dst_mean_piat_ms":         src2dst_mean_piat_ms,  
            "src2dst_stddev_piat_ms":       src2dst_stddev_piat_ms,      
            "src2dst_max_piat_ms":          src2dst_max_piat_ms,  
            "dst2src_min_piat_ms":          dst2src_min_piat_ms,  
            "dst2src_mean_piat_ms":         dst2src_mean_piat_ms,  
            "dst2src_stddev_piat_ms":       dst2src_stddev_piat_ms,      
            "dst2src_max_piat_ms":          dst2src_max_piat_ms 
        }

        return result

    def packet_size_related_features(self, flow:dict) -> dict:
        """
        bidirectional_packets
        bidirectional_bytes
        src2dst_packets
        src2dst_bytes
        dst2src_packets
        dst2src_bytes
        bidirectional_min_ps
        bidirectional_mean_ps
        bidirectional_stddev_ps
        bidirectional_max_ps
        src2dst_min_ps
        src2dst_mean_ps
        src2dst_stddev_ps
        src2dst_max_ps
        dst2src_min_ps
        dst2src_mean_ps
        dst2src_stddev_ps
        dst2src_max_ps
        """
        ps_list, direction_list = flow["ps_list"], flow["direction_list"]
        bidirectional_packets = len(ps_list)
        bidirectional_bytes = np.sum(ps_list)
        bidirectional_min_ps = np.min(ps_list)
        bidirectional_mean_ps = np.mean(ps_list)
        bidirectional_stddev_ps = np.std(ps_list)
        bidirectional_max_ps = np.max(ps_list)

        ########################################

        src2dst_ps_list = self._get_subseq_by_direction_list(ps_list, direction_list, "src2dst")
        src2dst_packets = len(src2dst_ps_list)
        src2dst_bytes = np.sum(src2dst_ps_list)
        src2dst_min_ps = np.min(src2dst_ps_list)
        src2dst_mean_ps = np.mean(src2dst_ps_list)
        src2dst_stddev_ps = np.std(src2dst_ps_list)
        src2dst_max_ps = np.max(src2dst_ps_list)

        ########################################

        dst2src_ps_list = self._get_subseq_by_direction_list(ps_list, direction_list, "dst2src")
        dst2src_packets = len(dst2src_ps_list)
        dst2src_bytes = np.sum(dst2src_ps_list)
        dst2src_min_ps = np.min(dst2src_ps_list)
        dst2src_mean_ps = np.mean(dst2src_ps_list)
        dst2src_stddev_ps = np.std(dst2src_ps_list)
        dst2src_max_ps = np.max(dst2src_ps_list)

        ########################################

        result = {
            "bidirectional_packets":    bidirectional_packets,
            "bidirectional_bytes":      bidirectional_bytes,
            "src2dst_packets":          src2dst_packets,
            "src2dst_bytes":            src2dst_bytes,
            "dst2src_packets":          dst2src_packets,
            "dst2src_bytes":            dst2src_bytes,
            "bidirectional_min_ps":     bidirectional_min_ps,
            "bidirectional_mean_ps":    bidirectional_mean_ps,
            "bidirectional_stddev_ps":  bidirectional_stddev_ps,
            "bidirectional_max_ps":     bidirectional_max_ps,
            "src2dst_min_ps":           src2dst_min_ps,
            "src2dst_mean_ps":          src2dst_mean_ps,
            "src2dst_stddev_ps":        src2dst_stddev_ps,
            "src2dst_max_ps":           src2dst_max_ps,
            "dst2src_min_ps":           dst2src_min_ps,
            "dst2src_mean_ps":          dst2src_mean_ps,
            "dst2src_stddev_ps":        dst2src_stddev_ps,
            "dst2src_max_ps":           dst2src_max_ps,
        }
        return result      

    def get_statistical_features(self, filepath:str) -> pd.DataFrame:
        """"""
        flow = self.get_target_flow(filepath)
        time_related_features = self.time_related_features(flow)
        packet_size_related_features = self.packet_size_related_features(flow)
        df1 = pd.DataFrame(time_related_features, index = [0])
        df2 = pd.DataFrame(packet_size_related_features, index = [0])
        df = pd.concat([df1, df2], axis=1)
        
        return df

    def _label_to_idx_range_ends(self, label:int, num:int) -> Tuple[int, int]:
        """Map label to index range of Pandas DataFrame.

        Args:
            label: Label, int number.
            num: The number of traffic trace of each domain.

        Returns:
            Tuple, first index and last index.
        """
        return label*num, label*num+(num)

    def _shuffle(self, dataset_path:str, label2domain_path:str, \
        keep_original_files:bool=False, num:int=100) \
        -> Tuple[str, str]:
        """Scrambles the data set element order.

        This method is reproducible.

        Args:
            dataset_path: Path to dataset.
            label2domain_path: Path to label2domain file. 
            keep_original_files: Whether to keep original_file.
            num: The number of traffic trace of each domain.

        Returns:
            tuple, consist of:
                - shuffled_dataset_path: New dataset path.
                - shuffled_label2domain_path: New idx2domain file path.
        """
        random.seed(1) # to make result reproducible
        label2domain, shuffled_label2domain = {}, {}
        shuffled_df = pd.DataFrame()
        dl = []

        # load data
        with open(label2domain_path, "r") as f_label2domain:
            label2domain = json.load(f_label2domain)

        num_of_domain = len(label2domain)
        shuffled_label_list = [i for i in range(num_of_domain)]
        random.shuffle(shuffled_label_list)

        df = pd.read_csv(dataset_path)

        # shuffle
        for new_label, label in enumerate(shuffled_label_list):
            rows = self._label_to_idx_range_ends(label, num)
            df.iloc[rows[0]:rows[1], -1] = new_label # update label
            dl.append(df.iloc[rows[0]:rows[1]])
            shuffled_label2domain[new_label] = label2domain[str(label)] 

        shuffled_df = pd.concat(dl)

        # save
        shuffled_dataset_path = dataset_path.rsplit(".", 1)[0] + "-shuffled." + \
            dataset_path.rsplit(".", 1)[1]
        shuffled_label2domain_path = label2domain_path.rsplit(".", 1)[0] + "-shuffled." + \
            label2domain_path.rsplit(".", 1)[1]
        
        shuffled_df.to_csv(shuffled_dataset_path, index=False)

        with open(shuffled_label2domain_path, "w") as f_out:
            f_out.write(json.dumps(shuffled_label2domain, indent=4))

        # delete files
        if not keep_original_files:
            os.remove(dataset_path)
            os.remove(label2domain_path)

        return shuffled_dataset_path, shuffled_label2domain_path

    def inspect_batch(self, input_dir:str, shuffle:bool=True):
        """

        Q:  What's the difference between "analyzer" and "mixer"?
        A:  Json files generated by analyzer are pretty "raw", which means analyzer do not edit the
            original information. When it comes to mixer, it will remove irrlevant flows and just 
            keep one flow with largest size, it will also remove packets  related to TLS handshake
            (first 10 packets) and TCP tear down (last).

        Args:
            input_dir: As its name.
            shuffle: Whether to shuffle. Default True.
        """
        logging.info("SFExtractor start working. Sequence type: {} .Directory: {}".\
            format(self.sequence_type, input_dir))
        df = pd.DataFrame()
        label2domain = {}
        t = time.strftime("%Y%m%d%H%M%S", time.localtime())
        if self.sequence_type == "injector":
            input_dir_name = input_dir.rsplit("/", 1)[-1]
            dataset_name = "dataset-{}-{}.csv".format(t, input_dir_name)
            dictionary_name = "index2domain-{}-{}.json".format(t, input_dir_name)
        else:
            dataset_name = "dataset-{}-{}-{}.csv".format("SFExtractor", t, self.sequence_type)
            dictionary_name = "index2domain-{}-{}-{}.json".format("SFExtractor", t, self.sequence_type)

        label = 0
        dl = []
        domains = os.listdir(input_dir)

        for domain in tqdm(domains, ncols=50):
            json_domain_dir = os.path.join(input_dir, domain)
            json_files = glob.glob(os.path.join(json_domain_dir, "*.json"))

            for json_file in json_files:
                df_temp = self.get_statistical_features(json_file)
                df_temp["label"] = label
                dl.append(df_temp)

            label2domain[label] = domain
            label += 1
        
        df = pd.concat(dl)

        if not os.path.exists(self.output_dir):
            os.makedirs(self.output_dir)

        dataset_path = os.path.join(self.output_dir, dataset_name)
        label2domain_path = os.path.join(self.output_dir, dictionary_name)
        
        df.to_csv(dataset_path, index=False)

        with open(label2domain_path, "w") as f_out:
            f_out.write(json.dumps(label2domain, indent=4))

        if shuffle:
            self._shuffle(
                dataset_path=dataset_path,
                label2domain_path=label2domain_path,
            )
        
        logging.info("Done. Dataset was save at {}".format(self.output_dir))

    def inspect(self, filepath:str):
        """"""
        df = self.get_statistical_features(filepath)
        print(df)
    
def main():
    parser = argparse.ArgumentParser(description="Statical Feature Extractor.")

    parser.add_argument(
        "--type","-t",
        type=str,
        default="analyzer",
        help="sequence type, support 'analyzer', 'mixer' or 'injector'"
    )

    parser.add_argument(
        "--input_dir", "-i",
        type=str,
        )

    parser.add_argument(
        "--output_dir", "-o",
        type = str,
        default = "data/dataset",
        help = "directory to save dataset (default 'data/dataset')")

    args = parser.parse_args()

    config = {
        "output_dir": args.output_dir,
        "sequence_type": args.type,
    }

    s = StatisticalFeatureExtractor(config)
    s.inspect_batch(
        input_dir=args.input_dir,
    )

if __name__ == "__main__":
    main()

# python SFExtractor.py -t analyzer -i data/output_of_analyzer/20220319215128
# python SFExtractor.py -t mixer -i data/output_of_mixer/20220329095714
# python SFExtractor.py -t injector -i data/output_of_injector/20220324151743