import random, os, time, json, glob, argparse
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from tqdm import tqdm
from sklearn.metrics import classification_report
from sklearn.neighbors import LocalOutlierFactor
from random import choice

class OpenWorld:
    """Multi target domain.
    """
    def __init__(self, config:dict={}):
        self.dataset_path = config.get("dataset_path")
        self.output_dir = config.get("output_dir", "data/open_world")
        self.seed = config.get("seed", 7)
        self.target_proportion = config.get("target_proportion", 0.01)
        self.target_domain_num = config.get("target_domain_num", 10)
        self.test_set_size = config.get("test_set_size", 10000)
        self.non_target_label = 1000
        self.num_of_trace_each_domain = 100
        
        random.seed(self.seed)

        self.random_order_domains = self._get_domains()

    def _get_domains(self, total_num:int=1000):
        """Return shuffled domain ID list.
        """
        domain_id_list = [i for i in range(total_num)]
        random.shuffle(domain_id_list)
        return domain_id_list

    def _load_data(self, target_only:bool=False):
        """Load data.

        This function is for open world multi domain reconization, it will return
        X_train, X_test, y_train, y_test specially.

        Suppose the dataset consist of 1000 domains, each domain has 100 trace,
        then the size of dataset is 100000.

        1% of total domains will be select as target domains,
        10% of total domains will be select as training domains,
        every domain maybe select as test domain.

        If test_set_size is set to 10000, and the target_proportion is 0.05,
        then 500 traces from all target domains will be included in test set,
        other 9500 traces is seleted from training domains and non training domains.

        Args:
            target_only: For LOF estimator.

        Returns:
            X_train, X_test, y_train, y_test
        """
        test_set_size = self.test_set_size
        target_proportion = self.target_proportion
        domain_list = self.random_order_domains
        target_domain_num = self.target_domain_num
        target_domain_id_list = domain_list[:target_domain_num]

        # assert(target_proportion <= 0.05)
        assert(target_proportion <= target_domain_num * 50 / test_set_size)

        
        # 训练集 100 个域名，每个域名 50 条记录，总共 5000 条域名
        df = pd.read_csv(self.dataset_path)

        # 在 target_domain_num 个目标域名对应的 target_domain_num * 100 条记录中，
        # 选择 test_set_size * target_proportion 个用于测试
        # (X_target_test, y_target_test)
        df_target = df[df["label"].isin(target_domain_id_list)]    
        X, y = df_target.iloc[:,:-1], df_target.iloc[:,-1],
        X_target_left, X_target_test, y_target_left, y_target_test = train_test_split(
            X, y,
            test_size=int(test_set_size * target_proportion),
            random_state=self.seed,
            stratify=y,
        )

        # 在 target_domain_num 个目标域名对应剩余的 
        # target_domain_num * 100 - test_set_size *  target_proportion 条记录中域名中，选择
        # 50 * target_domain_num 条用于训练 (X_target_train, y_target_train)
        # if target_proportion == 0.05:
        if target_proportion == 50 * target_domain_num / test_set_size:
            X_target_train, y_target_train = X_target_left, y_target_left
        else:
            X_target_train, X_, y_target_train, y_ = train_test_split(
                X_target_left, y_target_left,
                train_size=50 * target_domain_num,
                random_state=self.seed,
                stratify=y_target_left,
            )

        # 在 9 * target_domain_num 个 non_target 域名中，选择 9 * target_domain_num * 50 个用于训练
        # (X_non_target_train, y_non_target_train)
        df_non_targrt =  df[df["label"].isin(domain_list[target_domain_num:100])]
        X, y = df_non_targrt.iloc[:,:-1], df_non_targrt.iloc[:,-1]
        test_size = 0.5
        X_non_target_train, X_non_target_test_candidate,  y_non_target_train, y_non_target_test_candidate = train_test_split(
            X, y,
            # test_size=test_size,
            train_size=9 * target_domain_num * 50,
            random_state=self.seed,
            stratify=y,
        )

        # 在 1000 - target_domain_num 域名对应的 (1000 - target_domain_num) * 100 条记录中，
        # 选择 test_set_size * (1 - target_proportion) 条记录用于测试
        # 其中，在 non_target_test 域名中的占比为 ((1/11) * test_set_size * (1 - target_proportion)) / 4500
        
        df_non_targrt =  df[df["label"].isin(domain_list[10:100])]
        X, y = X_non_target_test_candidate, y_non_target_test_candidate
        # test_size = ((1/11) * test_set_size * (1 - target_proportion)) / 4500
        test_size = (((9 * target_domain_num) / (1000 - target_domain_num)) \
            * test_set_size * (1 - target_proportion)) / (9 * target_domain_num * 50)

        X_, X_non_target_test1,  y_, y_non_target_test1 = train_test_split(
            X, y,
            test_size=test_size,
            random_state=self.seed,
            stratify=y,
        )

        # 在 non_train 域名中的占比为 (10/11) * test_set_size * (1 - target_proportion) / (900 * 100)
        df_non_train = df[df["label"].isin(domain_list[10 * target_domain_num:])]
        X, y = df_non_train.iloc[:,:-1], df_non_train.iloc[:,-1]
        # test_size = (10/11) * test_set_size * (1 - target_proportion) / (900 * 100)
        test_size = (((1000 - 10 * target_domain_num) / (1000 - target_domain_num)) * \
            test_set_size * (1 - target_proportion)) / ((1000 - 10 * target_domain_num) * 100)
        X_, X_non_target_test2,  y_, y_non_target_test2 = train_test_split(
            X, y,
            test_size=test_size,
            random_state=self.seed,
            stratify=y,
        )

        y_non_target_train = self._modify_label(y_non_target_train, self.non_target_label)
        y_non_target_test1 = self._modify_label(y_non_target_test1, self.non_target_label)
        y_non_target_test2 = self._modify_label(y_non_target_test2, self.non_target_label)
        X_train = pd.concat((X_target_train, X_non_target_train))
        y_train = pd.concat((y_target_train, y_non_target_train))
        X_test = pd.concat((X_target_test, X_non_target_test1, X_non_target_test2))[:test_set_size]
        # X_test = pd.concat((X_target_test, X_non_target_test1, X_non_target_test2))
        y_test = pd.concat((y_target_test, y_non_target_test1, y_non_target_test2))[:test_set_size]
        # y_test = pd.concat((y_target_test, y_non_target_test1, y_non_target_test2))

        if target_only:
            target_train = pd.concat((X_target_train, y_target_train), axis=1)
            target_train.sort_values(by="label", inplace=True)
            X_target_train = target_train.iloc[:,:-1]
            y_target_train = target_train.iloc[:,-1]

            return X_target_train.values, X_test.values, y_target_train.values.ravel(), y_test.values.ravel()

        return X_train.values, X_test.values, y_train.values.ravel(), y_test.values.ravel()

    def show_result(self, report:dict, non_target_label:int=1000):
        """Calculate average precision and recall.
        """
        irrelevant_key = ["accuracy", "macro avg", "weighted avg", "{}.0".format(non_target_label)]
        result = []
        for key in report:
            if key not in irrelevant_key:
                result.append(report[key])
        
        target_avg_precision, target_avg_recall = 0.0, 0.0
        for r in result:
            target_avg_precision += r["precision"]
            target_avg_recall += r["recall"]

        target_avg_precision /= len(result)
        target_avg_recall /= len(result)

        print("target_avg_precision: ", target_avg_precision)
        print("target_avg_recall: ", target_avg_recall)

    def _modify_label(self, df:pd.DataFrame, new_label:int):
        """Set every label to new_label.
        """
        temp = np.ones(df.shape[0]) * new_label
        df = pd.DataFrame(temp)
        return df

    def _train_set_split(self, X, y, subset_size:int=50):
  
        i = 0
        length = X.shape[0]
        subset_num = int(length/subset_size)
        X_list, y_list = [], []

        for i in range(subset_num):
            X_list.append(X[i*subset_size:(i+1)*subset_size,:])
            y_list.append(y[i*subset_size:(i+1)*subset_size])
        
        return X_list, y_list
    
    def _modify_label_by_target(self, y, target:int):
        """Set label to 1 if label equal to target else -1.
        """
        new_y = []
        for label in y:
            if label != target:
                new_y.append(-1)
            else:
                new_y.append(1)
        return new_y

    def _assemble_results(self, results, target_list):
        """Each single LOF estimator inspect one class.

        This function assemble results togather.
        """
        random.seed(time.time())
        assembled_result = []
        for col in range(len(results[0])):
            candidate_label = []
            for row, label in zip(range(len(results)), target_list):
                if results[row][col] == 1:               
                    candidate_label.append(label)
            if candidate_label != []:
                label_ = choice(candidate_label)
            else:
                label_ = self.non_target_label
            assembled_result.append(label_)
        random.seed(self.seed)
        return assembled_result

    def run_LOF(self, subset_size=50, print_on_screen:bool=True):
        """
        1: target
        -1: non target
        """
        X_train, X_test, y_train, y_test = self._load_data(target_only=True)     
        X_list, y_list = self._train_set_split(X_train, y_train)

        estimator_list, results, target_list = [], [], []

        for i in range(self.target_domain_num):
            estimator_list.append(LocalOutlierFactor(novelty=True))
            target_list.append(y_list[i][0])

        for i in range(self.target_domain_num):
            y_true = self._modify_label_by_target(y_test, target_list[i])
            estimator_list[i].fit(X_list[i])
            y_pred = estimator_list[i].predict(X_test)
            results.append(y_pred)
        
        y_pred = self._assemble_results(results, target_list)
        report = classification_report(y_test, y_pred, output_dict=True)
        if print_on_screen:
            print(classification_report(y_test, y_pred))
            self.show_result(report)

        return report

    def run_RF(self, print_on_screen:bool=True):
        X_train, X_test, y_train, y_test = self._load_data()
        rf = RandomForestClassifier(n_estimators=100, criterion="gini", max_depth=20, random_state=self.seed, n_jobs=-1)
        rf.fit(X_train, y_train)
        y_pred = rf.predict(X_test)
        report = classification_report(y_test, y_pred, output_dict=True)
        if print_on_screen:
            print(classification_report(y_test, y_pred))
            self.show_result(report)
        return report

    def run(self):
        target_proportion_list = [_/1000 for _ in range(10, 52, 2)]
        seed_list = [_ for _ in range(20)]
        t = time.strftime("%Y%m%d%H%M%S", time.localtime())
        output_dir = "data/open_world/{}".format(t)

        for target_proportion in target_proportion_list:
            print("target_proportion: ", target_proportion)
            RF_report_list, LOF_report_list = [], []
            for seed in tqdm(seed_list, ncols=50):
                config = {
                    "dataset_path": self.dataset_path,
                    "target_proportion": target_proportion,
                    "seed": seed,
                    "output_dir": output_dir
                }
                self.__init__(config)

                RF_report = self.run_RF(print_on_screen=False)
                LOF_report = self.run_LOF(print_on_screen=False)

                RF_report_list.append(RF_report)
                LOF_report_list.append(LOF_report)
            
            report1 = {
                "model": "RF",
                "target_proportion": target_proportion,
                "results": RF_report_list,
            }
            report2 = {
                "model": "LOF",
                "target_proportion": target_proportion,
                "results": LOF_report_list,
            }
            self._save_report(report1)
            self._save_report(report2)
        
        return output_dir
    
    def _save_report(self, report:dict):
        report_filename = "{}-{}.json".format(report["model"], report["target_proportion"])
        if not os.path.exists(self.output_dir):
            os.makedirs(self.output_dir)
        with open(os.path.join(self.output_dir, report_filename), "w") as f_out:
            f_out.write(json.dumps(report, indent=4))

    def caculate(self, directory:str, save:bool=True):
        """"""
        LOF_res_files = glob.glob(os.path.join(directory, "LOF*.json"))
        RF_res_files = glob.glob(os.path.join(directory, "RF*.json"))

        irrelevant = ["1000.0", "accuracy", "macro avg", "weighted avg"]
        
        precision_report = {
            "LOF": {},
            "RF": {},
        }

        recall_report = {
            "LOF": {},
            "RF": {},
        }
        
        def read(filepath:str):
            with open(filepath, "r") as f_in:
                data = json.load(f_in)

            target_proportion = data["target_proportion"]

            results = data["results"]
            for result in results:
                avg_precision_list, avg_recall_list = [], []
                s = 0
                precision_, recall_ = 0.0, 0.0  
                for key in result:        
                    if key not in irrelevant:
                        precision_ += result[key]["precision"]
                        recall_ += result[key]["recall"]
                        s += 1

                avg_precision_list.append(precision_/s)
                avg_recall_list.append(recall_/s)
                precison = np.mean(avg_precision_list)
                recall = np.mean(avg_recall_list)     
            return precison, recall, target_proportion

        for file in LOF_res_files:
            precison, recall, target_proportion = read(file)
            precision_report["LOF"][target_proportion] = precison
            recall_report["LOF"][target_proportion] = recall

        for file in RF_res_files:
            precison, recall, target_proportion = read(file)
            precision_report["RF"][target_proportion] = precison
            recall_report["RF"][target_proportion] = recall

        df_precision = pd.DataFrame.from_dict(precision_report).sort_index()
        df_recall = pd.DataFrame.from_dict(recall_report).sort_index()

        if save:
            precision_filepath = os.path.join(directory, "precision.csv")
            recall_filepath = os.path.join(directory, "recall.csv")
            df_precision.to_csv(precision_filepath)
            df_recall.to_csv(recall_filepath)
            
        return df_precision, df_recall

def main():
    config = {
        "dataset_path": "/home/chuen/workspace/doh-traffic-trace-analyzer/data/dataset/important/dataset-SFExtractor-20220402200526-analyzer-shuffled.csv",
    }

    ow = OpenWorld(config)
    output_dir = ow.run()
    ow.caculate(output_dir)

def test():
    config = {
        "target_domain_num": 2,
        "target_proportion": 0.01,
        "test_set_size": 2000,
        "seed": 7,
    }
    ow2 = OpenWorld(config)
    ow2.run_LOF()
    ow2.run_RF()

if __name__ == "__main__":
    main()