import argparse
from typing import Tuple
import json, os, time, random

import numpy as np
import pandas as pd
from tqdm import tqdm

from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler

class EvalCloseWorld:
    def __init__(self, config={}):
        self.dataset_path = config.get("dataset_path")
        self.domain_num = config.get("domain_num", 1000)
        self.seed = config.get("seed", 7)
        self.validation_size = config.get("validation_size", 0.2) # 80% training set, 20% test set.

        self.models = {
            "NB": GaussianNB(var_smoothing=1e-8),
            "DT": DecisionTreeClassifier(criterion="entropy", max_depth=20, random_state=self.seed),
            "RF": RandomForestClassifier(n_estimators=50, criterion="gini", max_depth=20, random_state=self.seed, n_jobs=-1),
            "KNN": Pipeline([("Scaler", StandardScaler()), ("KNN", KNeighborsClassifier(n_neighbors=1))]),                                                                  
            # "SVM": Pipeline([("Scaler", StandardScaler()), ("SVM", SVC(C=5000, kernel="rbf", gamma=0.05))]),
        }

    def _load_data(self, num_of_files_per_domain:int=100) \
        -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
        """
        Args:
            num_of_files_per_domain: The number of pcap files per domain folder. Default 100.

        Retuens:
            X_train, X_test, y_train, y_test
        """
        num_of_domains =self.domain_num
        if num_of_domains == 0: df = pd.read_csv(self.dataset_path)
        else: df = pd.read_csv(self.dataset_path, nrows=num_of_domains*num_of_files_per_domain)

        dataset = df.values
        X, y = dataset[:, :-1], dataset[:, -1]

        X_train, X_test, y_train, y_test = train_test_split(
            X, y, test_size=self.validation_size, random_state=self.seed, stratify=y)
        
        return X_train, X_test, y_train, y_test

    def run(self):
        """"""
        X_train, X_test, y_train, y_test = self._load_data()

        report = {
            "dataset": self.dataset_path,
            "report_list" : []
        }

        for model_name, clf in self.models.items():
            print(model_name)
            t0 = time.time()
            clf.fit(X=X_train, y=y_train)
            y_pred = clf.predict(X_test)
            t1 = time.time()

            res = classification_report(y_test, y_pred, output_dict=True)
            duration = t1 - t0
            report_ = {
                "model": str(clf),
                "accuracy": res["accuracy"],
                "macro_avg": res["macro avg"],
                "dutration": duration,
                "report" : res
            }

            report["report_list"].append(report_)
            print("Duration: {:.2f} s".format(duration))
            print("-" * 60)
            
        return report

class EvalOpenWorld:
    def __init__(self, config:dict={}):
        self.dataset_path = \
            config.get("dataset_path")
        self.seed = config.get("seed", 7)
        self.target_proportion = config.get("target_proportion", 0.05)
        self.target_domain_num = config.get("target_domain_num", 10)
        self.test_set_size = config.get("test_set_size", 10000)
        self.non_target_label = 1000
        self.num_of_trace_each_domain = 100
        
        random.seed(self.seed)

        self.random_order_domains = self._get_domains()

    def _get_domains(self, total_num:int=1000):
        """Return shuffled domain ID list.
        """
        domain_id_list = [i for i in range(total_num)]
        random.shuffle(domain_id_list)
        return domain_id_list

    def _modify_label(self, df:pd.DataFrame, new_label:int):
        """Set every label to new_label.
        """
        temp = np.ones(df.shape[0]) * new_label
        df = pd.DataFrame(temp)
        return df

    def _load_data(self, target_only:bool=False):
        """Load data.

        This function is for open world multi domain reconization, it will return
        X_train, X_test, y_train, y_test specially.

        Suppose the dataset consist of 1000 domains, each domain has 100 trace,
        then the size of dataset is 100000.

        1% of total domains will be select as target domains,
        10% of total domains will be select as training domains,
        every domain maybe select as test domain.

        If test_set_size is set to 10000, and the target_proportion is 0.05,
        then 500 traces from all target domains will be included in test set,
        other 9500 traces is seleted from training domains and non training domains.

        Args:
            target_only: For LOF estimator.

        Returns:
            X_train, X_test, y_train, y_test
        """
        test_set_size = self.test_set_size
        target_proportion = self.target_proportion
        domain_list = self.random_order_domains
        target_domain_num = self.target_domain_num
        target_domain_id_list = domain_list[:target_domain_num]

        # assert(target_proportion <= 0.05)
        assert(target_proportion <= target_domain_num * 50 / test_set_size)

        
        # 训练集 100 个域名，每个域名 50 条记录，总共 5000 条域名
        df = pd.read_csv(self.dataset_path)

        # 在 target_domain_num 个目标域名对应的 target_domain_num * 100 条记录中，
        # 选择 test_set_size * target_proportion 个用于测试
        # (X_target_test, y_target_test)
        df_target = df[df["label"].isin(target_domain_id_list)]    
        X, y = df_target.iloc[:,:-1], df_target.iloc[:,-1],
        X_target_left, X_target_test, y_target_left, y_target_test = train_test_split(
            X, y,
            test_size=int(test_set_size * target_proportion),
            random_state=self.seed,
            stratify=y,
        )

        # 在 target_domain_num 个目标域名对应剩余的 
        # target_domain_num * 100 - test_set_size *  target_proportion 条记录中域名中，选择
        # 50 * target_domain_num 条用于训练 (X_target_train, y_target_train)
        # if target_proportion == 0.05:
        if target_proportion == 50 * target_domain_num / test_set_size:
            X_target_train, y_target_train = X_target_left, y_target_left
        else:
            X_target_train, X_, y_target_train, y_ = train_test_split(
                X_target_left, y_target_left,
                train_size=50 * target_domain_num,
                random_state=self.seed,
                stratify=y_target_left,
            )

        # 在 9 * target_domain_num 个 non_target 域名中，选择 9 * target_domain_num * 50 个用于训练
        # (X_non_target_train, y_non_target_train)
        df_non_targrt =  df[df["label"].isin(domain_list[target_domain_num:100])]
        X, y = df_non_targrt.iloc[:,:-1], df_non_targrt.iloc[:,-1]
        test_size = 0.5
        X_non_target_train, X_non_target_test_candidate,  y_non_target_train, y_non_target_test_candidate = train_test_split(
            X, y,
            # test_size=test_size,
            train_size=9 * target_domain_num * 50,
            random_state=self.seed,
            stratify=y,
        )

        # 在 1000 - target_domain_num 域名对应的 (1000 - target_domain_num) * 100 条记录中，
        # 选择 test_set_size * (1 - target_proportion) 条记录用于测试
        # 其中，在 non_target_test 域名中的占比为 ((1/11) * test_set_size * (1 - target_proportion)) / 4500
        
        df_non_targrt =  df[df["label"].isin(domain_list[10:100])]
        X, y = X_non_target_test_candidate, y_non_target_test_candidate
        # test_size = ((1/11) * test_set_size * (1 - target_proportion)) / 4500
        test_size = (((9 * target_domain_num) / (1000 - target_domain_num)) \
            * test_set_size * (1 - target_proportion)) / (9 * target_domain_num * 50)

        X_, X_non_target_test1,  y_, y_non_target_test1 = train_test_split(
            X, y,
            test_size=test_size,
            random_state=self.seed,
            stratify=y,
        )

        # 在 non_train 域名中的占比为 (10/11) * test_set_size * (1 - target_proportion) / (900 * 100)
        df_non_train = df[df["label"].isin(domain_list[10 * target_domain_num:])]
        X, y = df_non_train.iloc[:,:-1], df_non_train.iloc[:,-1]
        # test_size = (10/11) * test_set_size * (1 - target_proportion) / (900 * 100)
        test_size = (((1000 - 10 * target_domain_num) / (1000 - target_domain_num)) * \
            test_set_size * (1 - target_proportion)) / ((1000 - 10 * target_domain_num) * 100)
        X_, X_non_target_test2,  y_, y_non_target_test2 = train_test_split(
            X, y,
            test_size=test_size,
            random_state=self.seed,
            stratify=y,
        )

        y_non_target_train = self._modify_label(y_non_target_train, self.non_target_label)
        y_non_target_test1 = self._modify_label(y_non_target_test1, self.non_target_label)
        y_non_target_test2 = self._modify_label(y_non_target_test2, self.non_target_label)
        X_train = pd.concat((X_target_train, X_non_target_train))
        y_train = pd.concat((y_target_train, y_non_target_train))
        X_test = pd.concat((X_target_test, X_non_target_test1, X_non_target_test2))[:test_set_size]
        # X_test = pd.concat((X_target_test, X_non_target_test1, X_non_target_test2))
        y_test = pd.concat((y_target_test, y_non_target_test1, y_non_target_test2))[:test_set_size]
        # y_test = pd.concat((y_target_test, y_non_target_test1, y_non_target_test2))

        if target_only:
            target_train = pd.concat((X_target_train, y_target_train), axis=1)
            target_train.sort_values(by="label", inplace=True)
            X_target_train = target_train.iloc[:,:-1]
            y_target_train = target_train.iloc[:,-1]

            return X_target_train.values, X_test.values, y_target_train.values.ravel(), y_test.values.ravel()

        return X_train.values, X_test.values, y_train.values.ravel(), y_test.values.ravel()

    def run_RF(self):
        X_train, X_test, y_train, y_test = self._load_data()
        rf = RandomForestClassifier(
            n_estimators=50, criterion="gini", max_depth=20, random_state=self.seed, n_jobs=-1
        )
        rf.fit(X_train, y_train)
        y_pred = rf.predict(X_test)
        report = classification_report(y_test, y_pred, output_dict=True)

        return report
    
    def run(self):
        round_time = 20
        seed_list = [_ for _ in range(round_time)]
        irrelevant = ["1000.0", "accuracy", "macro avg", "weighted avg"]
        RF_report_list = [] 

        for seed in tqdm(seed_list, ncols=50):
            config = {
                "dataset_path": self.dataset_path,
                "seed": seed,
            }
            self.__init__(config)

            RF_report = self.run_RF()
            RF_report_list.append(RF_report)
 
        avg_precision, avg_recall = 0.0, 0.0

        def read_report(data:dict):
            precision_, recall_, s = 0.0, 0.0, 0
            for key, val in data.items():        
                if key not in irrelevant:
                    precision_ += val["precision"]
                    recall_ += val["recall"]
                    s += 1

            return precision_/s, recall_/s

        precision_, recall_ = 0.0, 0.0
        for r in RF_report_list:
            p, r = read_report(r)
            precision_ += p
            recall_ += r

        avg_precision = precision_/round_time
        avg_recall = recall_/round_time

        report = {
            "model": "RF",
            "target_proportion": self.target_proportion,
            "avg_precision": avg_precision,
            "avg_recall": avg_recall,
            "RF_report_list": RF_report_list,
        }

        return report

class Evaluator:
    def __init__(self, config:dict={}):
        self.output_dir = config.get("output_dir", "data/output_of_evaluator")
        self.domain_num = config.get("domain_num", 1000)
        self.seed = config.get("seed", 7)
        self.validation_size = config.get("validation_size", 0.2) # 80% training set, 20% test set.
        self.ND_dataset_path = \
            config.get("ND_dataset_path", 
            "data/dataset/important/dataset-SFExtractor-20220402200526-analyzer-shuffled.csv")

    def _save_report(self, report:dict, name:str) -> str:
        t = time.strftime("%Y%m%d%H%M%S", time.localtime())
        report_filename = "{}-{}.json".format(name, t)
        if not os.path.exists(self.output_dir):
            os.makedirs(self.output_dir)
        report_path = os.path.join(self.output_dir, report_filename)
        with open(report_path, "w") as f_out:
            f_out.write(json.dumps(report, indent=4))
        
        return report_path 

    def eval(self, dataset_type:str, dataset_path:str):
        """
        support dataset type: "ND", "TO" and "API"
        """
        config = {
            "dataset_path": dataset_path,
            "domain_num": self.domain_num,
        }
        ecw = EvalCloseWorld(config)
        cw_report = ecw.run()

        ow = EvalOpenWorld(config)
        ow_report = ow.run()

        report = {
            "dataset_path": dataset_path,
            "close_world": cw_report,
            "open_world": ow_report,
        }
        self._save_report(report, name="{}-{}".format(dataset_type, self.domain_num))

    def eval_API(self):
        """Evaluate the defense ability of Adaptive Packet Insertion.
        """
        pass

    def _get_test_set_domain_id(self):
        config = {
            "dataset_path": self.ND_dataset_path,
            "output_dir": self.output_dir,
            "domain_num": self.domain_num,
        }
        ecw = EvalCloseWorld(config)
        X_train, X_test, y_train, y_test = ecw._load_data()
        return y_test

class BWEvaluator:
    """Evaluate bandwidth consumption.
    """
    def __init__(self, config:dict={}):
        self.output_dir = config.get("output_dir", "data/output_of_evaluator/bw")
        self.domain_num = config.get("domain_num", 1000)
        self.seed = config.get("seed", 7)
        self.validation_size = config.get("validation_size", 0.2) # 80% training set, 20% test set.
        self.ND_dataset_path = \
            config.get("ND_dataset_path", 
            "data/dataset/important/dataset-SFExtractor-20220402200526-analyzer-shuffled.csv")

    def _get_total_packet_number_and_bytes(self, dataset_path:str, num_of_domains:int=0):
        """Get total packet number and bytes of dataset's test set.
        """
        if num_of_domains == 0: df = pd.read_csv(dataset_path)
        else: df = pd.read_csv(dataset_path, nrows=num_of_domains*100)

        X, y = df.iloc[:, :-1], df.iloc[:, -1]

        X_train, X_test, y_train, y_test = train_test_split(
            X, y, test_size=self.validation_size, random_state=self.seed, stratify=y)

        packet_num = X_test["bidirectional_packets"]
        size = X_test["bidirectional_bytes"]
        return np.sum(packet_num), np.sum(size)

    def eval_bandwidth_consumption(self, dataset_path:str):
        """Evaluate bandwidth consumption of certain anti-analysis approach.

        This evaluation is based on test set.

        Args:
            dataset_path: csv file.
        
        Returns:
            The total number of packets and size of test set.
        """
        nd_pn, nd_size = self._get_total_packet_number_and_bytes(self.ND_dataset_path)
        pn, size = self._get_total_packet_number_and_bytes(dataset_path)

        report ={
            "ND_dataset": {
                "path": self.ND_dataset_path,
                "packet_num": str(nd_pn),
                "size": str(nd_size),
            },
            "evaluated dataset": {
                "path": dataset_path,
                "packet_num": str(pn),
                "size": str(size),
            },
            "pn/nd_pn" : pn/nd_pn,
            "size/nd_size": size/nd_size,
        }
        config ={
            "output_dir": self.output_dir
        }
        _save_report = Evaluator(config)._save_report
        report_path = _save_report(report, name="bw")
        print("Report was save at '{}'".format(report_path))
        print()
        print("R_size:   {}".format(round(size/nd_size, 4)))
        print("R_packet: {}".format(round(pn/nd_pn, 4)))


def main():
    parser = argparse.ArgumentParser(description="Evaluate.")
    parser.add_argument(
        "--type","-t",
        type=str,
        help="dataset type, support \
            'ND'(No Defance), 'TO'(Traffic Obfuscation) or 'API'(Adaptive Packet Insertion)"
    )

    parser.add_argument(
        "--dataset_path","-d",
        type=str,
        help="dataset path"
    )

    parser.add_argument(
        "--bandwidth_compare", "-b",
        type=str,
        help="target dataset path"
    )

    parser.add_argument(
        "--output_dir", "-o",
        type=str,
        default="data/output_of_evaluator",
    )

    args = parser.parse_args()

    if args.bandwidth_compare != None:
        b = BWEvaluator()
        b.eval_bandwidth_consumption(args.bandwidth_compare)
    else:
        config = {
            "output_dir": args.output_dir
        }
        e = Evaluator(config)
        e.eval(
            dataset_type=args.type,
            dataset_path=args.dataset_path
        )

if __name__ == "__main__":
    main()