import argparse
from typing import Tuple
import json, os, time

import numpy as np
import pandas as pd

from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler

class CloseWorld:
    def __init__(self, config={}):
        self.dataset_path = config.get("dataset_path")
        self.output_dir = config.get("output_dir", "data/close_world")
        self.domain_num = config.get("domain_num", 100)
        self.num_of_folds = config.get("num_of_folds", 5)
        self.seed = config.get("seed", 7)
        self.validation_size = config.get("validation_size", 0.2) # 80% training set, 20% test set.
        self.scoring = config.get("scoring", "f1_macro")

        self.models = {
            "NB": GaussianNB(var_smoothing=1e-8),
            "DT": DecisionTreeClassifier(criterion="entropy", max_depth=20, random_state=self.seed),
            "RF": RandomForestClassifier(n_estimators=100, criterion="gini", max_depth=20, random_state=self.seed, n_jobs=-1),
            "KNN": Pipeline([("Scaler", StandardScaler()), ("KNN", KNeighborsClassifier(n_neighbors=1))]),                                                                  
            "SVM": Pipeline([("Scaler", StandardScaler()), ("SVM", SVC(C=5000, kernel="rbf", gamma=0.05))]),
        }

    def _load_data(self, dataset_path:str, num_of_domains:int=1000, num_of_files_per_domain:int=100) \
        -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
        """
        Args:
            dataset_path: Path to dataset.
            num_of_domains: Number of domains need to be loaded. 0 means all. Default 0.
            num_of_files_per_domain: The number of pcap files per domain folder. Default 100.

        Retuens:
            X_train, X_test, y_train, y_test
        """
        if num_of_domains == 0: df = pd.read_csv(dataset_path)
        else: df = pd.read_csv(dataset_path, nrows=num_of_domains*num_of_files_per_domain)

        dataset = df.values
        X, y = dataset[:, :-1], dataset[:, -1]

        X_train, X_test, y_train, y_test = train_test_split(
            X, y, test_size=self.validation_size, random_state=self.seed, stratify=y)
        
        return X_train, X_test, y_train, y_test

    def run(self, model_name:str, dataset_path:str):
        """"""
        clf = self.models[model_name]
        X_train, X_test, y_train, y_test = self._load_data(dataset_path)
        clf.fit(X=X_train, y=y_train)

        y_pred = clf.predict(X_test)
        print(classification_report(y_test, y_pred))


def main():
    parser = argparse.ArgumentParser(description="Evaluate.")

    parser.add_argument(
        "--dataset", "-d",
        type=str,
        required=True,
    )

    args = parser.parse_args()
    c = CloseWorld()
    c.run("KNN", args.dataset)

if __name__ == "__main__":
    main()

# python closeworld.py -d data/dataset/dataset-SFExtractor-20220320191830-shuffled.csv
# python closeworld.py -d data/dataset/dataset-SFExtractor-20220323160334-mixer-shuffled.csv
# python closeworld.py -d data/dataset/dataset-SFExtractor-20220324153450-injector-shuffled.csv