from typing import Tuple
import json, os, time

import numpy as np
import pandas as pd

from sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split, KFold
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler

class CloseWorld:
    class Config:
        def __init__(self):
            self.dataset_path = "/home/chuen/workspace/doh-traffic-trace-analyzer/data/dataset/important/dataset-SFExtractor-20220402200526-analyzer-shuffled.csv"
            self.output_dir = "data/close_world"
            self.domain_num = 100
            self.num_of_folds =  5
            self.seed = 7
            self.validation_size = 0.2 # 80% training set, 20% test set.
            self.scoring = "f1_macro"

    def __init__(self, config={}):
        self.cfg = self.Config()

        self.dataset_path = config.get("dataset_path", \
            "/home/chuen/workspace/doh-traffic-trace-analyzer/data/dataset/important/dataset-SFExtractor-20220402200526-analyzer-shuffled.csv")
        self.output_dir = config.get("output_dir", "data/close_world")
        self.domain_num = config.get("domain_num", 100)
        self.num_of_folds = config.get("num_of_folds", 5)
        self.seed = config.get("seed", 7)
        self.validation_size = config.get("validation_size", 0.2) # 80% training set, 20% test set.
        self.scoring = config.get("scoring", "f1_macro")

        self.tuned_candidates = {
            "NB": {
                "model": GaussianNB(var_smoothing=1e-8),
            },
            "DT": {
                "model": DecisionTreeClassifier(criterion="entropy", max_depth=20, random_state=self.cfg.seed),
            },
            "RF": {
                "model": RandomForestClassifier(n_estimators=50, criterion="gini", max_depth=20, random_state=self.cfg.seed, n_jobs=-1),
            },
            "KNN": {
                "model": Pipeline([("Scaler", StandardScaler()), ("KNN", KNeighborsClassifier(n_neighbors=1))]),                                                                  
            },
            # "SVM": {
            #     "model":  Pipeline([("Scaler", StandardScaler()), ("SVM", SVC(C=5000, kernel="rbf", gamma=0.05))]),
            #     # "model":  Pipeline([("Scaler", StandardScaler()), ("PCA", PCA(0.9999)), ("SVM", SVC(C=5000, kernel="rbf", gamma=0.05))]),
            # },
        }

    def _save_report(self, report:dict, model_name:str, user_cfg:Config=None) -> str:
        """Save report.

        report file name: report-model_name-domain_num-time.json

        Args:
            report: Report, in dict format.
            model_name: Model name.

        Returns:
            Report path.
        """
        if user_cfg != None:
            cfg = user_cfg
        else:
            cfg = self.cfg
        
        t = time.strftime("%Y%m%d%H%M%S", time.localtime())
        report_name = "report-{}-{}-{}.json".format(model_name, cfg.domain_num, t)
        report_path = os.path.join(cfg.output_dir, report_name)

        if not os.path.exists(cfg.output_dir):
            os.makedirs(cfg.output_dir)

        with open(report_path, "w") as f_out:
            f_out.write(json.dumps(report, indent=4))

        return report_path
 
    def _load_data(self, dataset_path:str, num_of_domains:int=1000, num_of_files_per_domain:int=100) \
        -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
        """Load data.

        Args:
            dataset_path: Path to dataset.
            num_of_domains: Number of domains need to be loaded. 0 means all. Default 0.
            num_of_files_per_domain: The number of pcap files per domain folder. Default 100.

        Retuens:
            X_train, X_test, y_train, y_test
        """
        seed = self.cfg.seed
        validation_size = self.cfg.validation_size

        if num_of_domains == 0:
            df = pd.read_csv(dataset_path)
        else:
            df = pd.read_csv(dataset_path, nrows=num_of_domains*num_of_files_per_domain)
        
        df = df.loc[:, df.std() > 0]
        dataset = df.values
        X = dataset[:, :-1]
        y = dataset[:, -1]

        X_train, X_test, y_train, y_test = train_test_split(
            X, y, test_size=validation_size, random_state=seed, stratify=y)
        
        return X_train, X_test, y_train, y_test

    def run(self, args:dict, name:str, output_file:bool=True, user_cfg:Config=None) -> str:
        """
        Args:
            user_cfg: User defined config.
            args: A dict consist of
                - model: Supervised learing model of scikit-learn.
                - param_grid: Tune hyper-parameter(s) if it is not emoty, where keys are parameter names, 
                values are candidate values.
            name: Composite estimator name.
            output_file: Whether to save result as file.

        Returns:
            Path of report file.
        """
        if user_cfg != None:
            cfg = user_cfg
        else:
            cfg = self.cfg

        model = args.get("model")
        param_grid = args.get("param_grid", {})
        tune = False
        if param_grid != {}:
            tune = True

        report = {
            "model": str(model),
            "duration": 0.0,
            "accuracy": 0.0,
            "macro_avg": {},
            "best_params": None,
            "grid_search": {},
            "report": None,
        }
        
        X_train, X_test, y_train, y_test = self._load_data(cfg.dataset_path, cfg.domain_num)
        t0 = time.time()

        if tune:
            clf = GridSearchCV(estimator=model, param_grid=param_grid, scoring=cfg.scoring, n_jobs=-1)
            grid_result = clf.fit(X=X_train, y=y_train)

            # best parameters
            report["best_params"] = grid_result.best_params_

            # grid scores
            means = clf.cv_results_["mean_test_score"]
            stds = clf.cv_results_["std_test_score"]
            for idx, (mean, std, params) in enumerate(zip(means, stds, clf.cv_results_["params"])):
                # print("%0.3f (+/-%0.03f) for %r" % (mean, std * 2, params))
                report["grid_search"][str(idx)] = {
                    "mean": mean,
                    "std": std,
                    "params": params
                }
        else:
            clf = model
            clf.fit(X=X_train, y=y_train)
    
        # detailed classification report
        y_pred = clf.predict(X_test)
        t1 = time.time()

        if output_file:
            report_ = classification_report(y_test, y_pred, output_dict=True)
            report["accuracy"] = report_["accuracy"]
            report["macro_avg"] = report_["macro avg"]
            report["report"] = report_
            duration = t1 - t0
            report["duration"] = duration

            model_name = str(model)
            print(model_name)
            print("accuracy: ", report["report"]["accuracy"])
            print("macro avg: ", report["report"]["macro avg"])
            print("Duration: {:.2f} s".format(round(duration, 2)))

            return self._save_report(report, name, user_cfg=cfg)
        else:
            print(classification_report(y_test, y_pred))
            return ""
    
    def tuned(self):
        cfg = self.Config()

        cfg.output_dir = "data/close_world/tuned3"
        tuned_candidates = self.tuned_candidates

        for key in tuned_candidates:
            for i in range(100, 1100, 100): 
                cfg.domain_num = i
                print("-" * 100)
                path = self.run(tuned_candidates[key], key, user_cfg=cfg)
                print("Report was save at ", path)

    def tune(self):
        cfg = Config()

        candidates = {
            # "KNN": {
            #     "model": KNeighborsClassifier(),
            #     "param_grid": {
            #         "n_neighbors": range(1, 7, 2),
            #     }
            # },

            # "KNN": {
            #     "model": Pipeline([("Scaler", StandardScaler()), ("KNN", KNeighborsClassifier())]),
            #     "param_grid": {
            #         "KNN__n_neighbors": range(1, 7, 2),
            #     }                                                                           
            # },

            # "NB": {
            #     "model": GaussianNB(),
            #     "param_grid": {
            #         "var_smoothing": [1e-9, 1e-8, 1e-7]
            #     }
            # },

            # "SVM": {
            #     # "model":  Pipeline([("Scaler", StandardScaler()), ("PCA", PCA(0.9999)), ("SVM", SVC(C=5000, kernel="rbf", gamma=0.05))]),
            #     "model":  Pipeline([("Scaler", StandardScaler()), ("SVM", SVC(C=5000, kernel="rbf", gamma=0.05))]),
            #     "param_grid": {
            #         "SVM__kernel": ["rbf"]
            #         # "SVM__kernel": ["lnear"],
            #         # "SVM__C": [_ / 10 for _ in range(6, 80, 5)] # [0.6, 0.8, 1.0, 1.2, 1.4, 1.6, 1.8, 2.0, 2.2]
            #         "SVM__C": [5000],
            #         "SVM__gamma": [0.05],
            #     }
            # },

            # "DT": {
            #     "model": DecisionTreeClassifier(random_state=cfg.seed),
            #     "param_grid": {
            #         "criterion": ["gini", "entropy"],
            #         "max_depth": [20, 30, None],
            #     },
            # },

            # "RF": {
            #     "model": RandomForestClassifier(random_state=cfg.seed, n_jobs=-1),
            #     "param_grid": {
            #         "n_estimators": [200],
            #         "criterion": ["gini"],
            #         "max_depth": [20, 30, None],
            #     }
            # },

            # "LR": {
            #     "model": LogisticRegression()
            # },
        }

        for key in candidates:
            print("-" * 100)
            path = self.run(candidates[key], key, user_cfg=cfg)
            print("Report was save at ", path)

    def confusion_matrix(self, model_name:str="SVM", num_of_domains:int=100):
        """Generate confusion matrix.

        Returns:
            Matrix.
        """
        X_train, X_test, y_train, y_test = self._load_data(self.cfg.dataset_path, num_of_domains)
        clf = self.tuned_candidates[model_name]["model"]

        clf.fit(X=X_train, y=y_train)
        y_true, y_pred = y_test, clf.predict(X_test)

        return confusion_matrix(y_true, y_pred)

    def cross_val(self, num_of_domains:int=100, num_of_files_per_domain:int=100, 
        num_of_folds:int=10, score:str="f1_macro") -> str:
        """Cross validation.

        The name format of cv_result_file: cv-num_of_fold-score-time.csv
        A example line in cv_result_file: KNN,0.98856638,0.9790027,0.98894347,0.97473751,0.97649344
        Returns:
            Path to cross validation result file.
        """
        t = time.strftime("%Y%m%d%H%M%S", time.localtime())
        cv_result_file = "cv-{}-{}-{}.csv".format(num_of_folds, score, t)
        cv_result_filepath = os.path.join(self.cfg.output_dir, cv_result_file)
        results = []

        if num_of_domains == 0:
            df = pd.read_csv(self.cfg.dataset_path)
        else:
            df = pd.read_csv(self.cfg.dataset_path, nrows=num_of_domains*num_of_files_per_domain)

        df = df.loc[:, df.std() > 0]
        dataset = df.values
        X = dataset[:, :-1]
        y = dataset[:, -1]

        for key in self.tuned_candidates:
            t0 = time.time()
            kfold = KFold(n_splits=num_of_folds, shuffle=True, random_state=self.cfg.seed)
            cv_results = cross_val_score(self.tuned_candidates[key]["model"], X, y, cv=kfold, scoring=self.cfg.scoring)
            results.append(cv_results)
            t1 = time.time()
            print("-" * 100)
            print("Duration: {:.2f}".format(t1 - t0))
            print("{}: {:.2f} ({:.2f})".format(self.tuned_candidates[key]["model"], round(cv_results.mean(), 2), round(cv_results.std(), 2)))
            print("-" * 100)
            print(cv_results)
            print()
        
        with open(cv_result_filepath, "w") as f_out:
            for key, result in zip(self.tuned_candidates, results):
                f_out.write(key + ",")
                for e in result:
                    f_out.write(str(e) + ",")
                f_out.write("\n")

        return cv_result_filepath

def main():
    config = {
        "dataset_path": "/home/chuen/workspace/doh-traffic-trace-analyzer/data/dataset/important/dataset-SFExtractor-20220402200526-analyzer-shuffled.csv",
        "output_dir": "data/close_world",
        "domain_num": 100,
        "num_of_folds":  5,
        "seed": 7,
        "validation_size": 0.2, # 80% training set, 20% test set.
        "scoring": "f1_macro",
    }

    cw = CloseWorld()
    # cw.tuned()
    # mat = cw.confusion_matrix()
    # print(mat)
    cw.cross_val(num_of_domains=500)
    pass

if __name__ == "__main__":
    main()