import os, glob, json, argparse
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import cm, ticker
import numpy as np
from sklearn.feature_selection import SelectKBest
from sklearn.metrics import confusion_matrix
from sklearn.feature_selection import chi2, mutual_info_classif, RFE
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier

from close_world import CloseWorld

class Dataset():
    def __init__(self, config:dict={}):
        self.config = {}
        self.config["dataset_path"] = config.get("dataset_path", "/home/chuen/workspace/doh-traffic-trace-analyzer/data/dataset/important/dataset-SFExtractor-20220402200526-analyzer-shuffled.csv")
        self.config["output_dir"] = config.get("output_dir", "data/dataset_report")
        self.config["dataset_name"] = self.config["dataset_path"].rsplit("/", 1)[-1].rsplit(".")[0]
        self.config["report_dir"] = os.path.join(self.config["output_dir"], self.config["dataset_name"])

        if not os.path.exists(self.config["report_dir"]):
            os.makedirs(self.config["report_dir"])

        self.dataset = pd.read_csv(self.config["dataset_path"])

        # remove features with 0 std
        # self.dataset = self.dataset.loc[:, self.dataset.std() > 0]

        self.feature_name2zh_name = {
            "bidirectional_duration_ms": "双向流持续时长",
            "src2dst_duration_ms": "正向流持续时长",
            "dst2src_duration_ms": "反向流持续时长",
            "bidirectional_min_piat_ms": "双向流最小数据包间隔时长",
            "bidirectional_mean_piat_ms": "双向流平均数据包间隔时长",
            "bidirectional_stddev_piat_ms": "双向流数据包间隔时长标准差",
            "bidirectional_max_piat_ms": "双向流最大数据包间隔时长",
            "src2dst_min_piat_ms": "正向流最小数据包间隔时长",
            "src2dst_mean_piat_ms": "正向流平均数据包间隔时长",
            "src2dst_stddev_piat_ms": "正向流数据包间隔时长标准差",
            "src2dst_max_piat_ms": "正向流最大数据包间隔时长",
            "dst2src_min_piat_ms": "反向流最小数据包间隔时长",
            "dst2src_mean_piat_ms": "反向流平均数据包间隔时长",
            "dst2src_stddev_piat_ms": "反向流数据包间隔时长标准差",
            "dst2src_max_piat_ms": "反向流最大数据包间隔时长",
            "bidirectional_packets": "双向流累计数据包数",
            "bidirectional_bytes": "双向流累计字节数",
            "src2dst_packets": "正向流累计数据包数",
            "src2dst_bytes": "正向流累计字节数",
            "dst2src_packets": "反向流累计数据包数",
            "dst2src_bytes": "反向流累计字节数",
            "bidirectional_min_ps": "双向流最小数据包大小",
            "bidirectional_mean_ps": "双向流平均数据包大小",
            "bidirectional_stddev_ps": "双向流数据包大小标准差",
            "bidirectional_max_ps": "双向流最大数据包大小",
            "src2dst_min_ps": "正向流最小数据包大小",
            "src2dst_mean_ps": "正向流平均数据包大小",
            "src2dst_stddev_ps": "正向流数据包大小标准差",
            "src2dst_max_ps": "正向流最大数据包大小",
            "dst2src_min_ps": "反向流最小数据包大小",
            "dst2src_mean_ps": "反向流平均数据包大小",
            "dst2src_stddev_ps": "反向流数据包大小标准差",
            "dst2src_max_ps": "反向流最大数据包大小",
        }

        self.feature_name2id = {
            "bidirectional_duration_ms": "[1]",
            "src2dst_duration_ms": "[2]",
            "dst2src_duration_ms": "[3]",
            "bidirectional_min_piat_ms": "[4]",
            "bidirectional_mean_piat_ms": "[5]",
            "bidirectional_stddev_piat_ms": "[6]",
            "bidirectional_max_piat_ms": "[7]",
            "src2dst_min_piat_ms": "[8]",
            "src2dst_mean_piat_ms": "[9]",
            "src2dst_stddev_piat_ms": "[10]",
            "src2dst_max_piat_ms": "[11]",
            "dst2src_min_piat_ms": "[12]",
            "dst2src_mean_piat_ms": "[13]",
            "dst2src_stddev_piat_ms": "[14]",
            "dst2src_max_piat_ms": "[15]",
            "bidirectional_packets": "[16]",
            "bidirectional_bytes": "[17]",
            "src2dst_packets": "[18]",
            "src2dst_bytes": "[19]",
            "dst2src_packets": "[20]",
            "dst2src_bytes": "[21]",
            "bidirectional_min_ps": "[22]",
            "bidirectional_mean_ps": "[23]",
            "bidirectional_stddev_ps": "[24]",
            "bidirectional_max_ps": "[25]",
            "src2dst_min_ps": "[26]",
            "src2dst_mean_ps": "[27]",
            "src2dst_stddev_ps": "[28]",
            "src2dst_max_ps": "[29]",
            "dst2src_min_ps": "[30]",
            "dst2src_mean_ps": "[31]",
            "dst2src_stddev_ps": "[32]",
            "dst2src_max_ps": "[33]",
        }

        self.feature_name2unit = {
            "bidirectional_duration_ms": "(ms)",
            "src2dst_duration_ms": "(ms)",
            "dst2src_duration_ms": "(ms)",
            "bidirectional_min_piat_ms": "(ms)",
            "bidirectional_mean_piat_ms": "(ms)",
            "bidirectional_stddev_piat_ms": "(ms)",
            "bidirectional_max_piat_ms": "(ms)",
            "src2dst_min_piat_ms": "(ms)",
            "src2dst_mean_piat_ms": "(ms)",
            "src2dst_stddev_piat_ms": "(ms)",
            "src2dst_max_piat_ms": "(ms)",
            "dst2src_min_piat_ms": "(ms)",
            "dst2src_mean_piat_ms": "(ms)",
            "dst2src_stddev_piat_ms": "(ms)",
            "dst2src_max_piat_ms": "(ms)",
            "bidirectional_packets": "",
            "bidirectional_bytes": "(bytes)",
            "src2dst_packets": "",
            "src2dst_bytes": "(bytes)",
            "dst2src_packets": "(bytes)",
            "dst2src_bytes": "(bytes)",
            "bidirectional_min_ps": "(bytes)",
            "bidirectional_mean_ps": "(bytes)",
            "bidirectional_stddev_ps": "(bytes)",
            "bidirectional_max_ps": "(bytes)",
            "src2dst_min_ps": "(bytes)",
            "src2dst_mean_ps": "(bytes)",
            "src2dst_stddev_ps": "(bytes)",
            "src2dst_max_ps": "(bytes)",
            "dst2src_min_ps": "(bytes)",
            "dst2src_mean_ps": "(bytes)",
            "dst2src_stddev_ps": "(bytes)",
            "dst2src_max_ps": "(bytes)",
        }
    def show_zero_std_feature(self):
        sample_num = 100000
        features = self.dataset.iloc[:sample_num, :-1]
        df = features.loc[:, features.std() == 0]
        print(df.columns)
        
    def describe(self) -> str:
        """Output static information of dataset.

        Returns:
            describe_file_path: A csv file contains dataset's static information.
        """
        describe_file_path = os.path.join(self.config["report_dir"], "describe.csv")
        self.dataset.describe().to_csv(describe_file_path)

        print("describe file was saved at ", describe_file_path)

        return describe_file_path

    def shape(self):
        """Print shape of dataset (row, col).
        """
        print("-" * 50)
        print("shape:")
        print(self.dataset.shape)
        print("-" * 50)

    def show_type(self):
        """Print features' type.
        """
        print("-" * 50)
        print("type:")
        pd.set_option("display.max_rows", 100)
        print(self.dataset.dtypes)
        print("-" * 50)

    def show_distribution(self):
        """Show label distribution.
        """
        label_col = self.dataset.shape[1]
        print(self.dataset.groupby("label").size())

    def histogram(self) -> str:
        """Show and save histogram.

        Returns:
            hist_path: Path to histogram.
        """
        # hist_path = os.path.join(self.config["report_dir"], "hsit.svg")
        features = self.dataset.iloc[:, :-1]
        features.hist(sharex=False, sharey=False, xlabelsize=1, ylabelsize=1,
            figsize=(20, 20), layout=(8, 6))
        # plt.savefig(hist_path, format="svg")

        # return hist_path

    def density(self) -> str:
        """Show and save density (KDE) figure.

        Make sure that the features' std not equal to 0.

        Returns:
            density_path: Path to density figure.
        """
        density_path = os.path.join(self.config["report_dir"], "density.svg")
        features = self.dataset.iloc[:, :-1]
        # print(features.iloc[1].index.tolist())
        features.plot(
            kind="density",
            title=features.iloc[1].index.tolist(),
            subplots=True, 
            figsize=(20, 20),
            layout=(8, 6),
            sharex=False,
            legend=False,
            fontsize=1
        )
        plt.savefig(density_path, format="svg")
        plt.show()

        return density_path

    def feature_distribuiton_visualize(self):
        """"""
        sns.set_theme(style="ticks", font="MiSans,MiSans Normal", font_scale=1.5)
        sample_num = 100000
        # sample_num = 1000

        features = self.dataset.iloc[:sample_num, :-1]
        df = features.loc[:, features.std() > 0]
        row, col = df.shape
        row_, col_ = col // 4 + 1, 4
        feature_set = [_ for _ in df]

        fig, axes = plt.subplots(row_, col_, figsize=(20,26))
        # fig, axes = plt.subplots(row_, col_)
        fig.subplots_adjust(wspace=0.5, hspace=0.8)

        for ax, feature in zip(axes.flat, feature_set):

            # sns.histplot(df[feature].values, ax=ax, stat="density")
            # sns.kdeplot(df[feature].values, color='crimson', ax=ax)
            sns.histplot(df[feature].values, kde=True, ax=ax)
            ax.lines[0].set_color('crimson')
            if feature in self.feature_name2zh_name:
                label = "{} {}{}".\
                    format(
                        self.feature_name2id[feature],
                        self.feature_name2zh_name[feature],
                        self.feature_name2unit[feature]
                    )
                
                ax.set_title("")
                ax.set_xlabel(label)
            else:
                # ax.set_title(feature, x=0.5,y=-0.5)
                ax.set_title("")
                ax.set_xlabel(label)

            ax.set(
                # title=feature,
                ylabel=""
            )

        # remove empty subfig
        for ax in axes.flat[len(feature_set):]:
            ax.remove()

        img_path = os.path.join(self.config["report_dir"], "feature_distribuiton.svg")
        plt.savefig(img_path, format="svg", bbox_inches="tight")
        return img_path

    def correlation(self) -> str:
        """Show pairwise correlation of column.

        Returns:
            corr_path: Path to corrlation figure.
        """
        sns.set_theme(style="white")
        features = self.dataset.iloc[:, :-1]
        features = features.loc[:, features.std() > 0]
        columns = features.columns
        new_columns = []
        for fn in columns:
            new_columns.append("{}".format(self.feature_name2id[fn][1:-1]+" "))

        features.columns = new_columns
        corr = features.corr()

        # Generate a mask for the upper triangle
        mask = np.triu(np.ones_like(corr, dtype=bool))

        # Set up the matplotlib figure
        f, ax = plt.subplots(figsize=(10, 10))

        # Generate a custom diverging colormap
        cmap = cm.coolwarm

        # Draw the heatmap with the mask and correct aspect ratio
        sns.heatmap(corr, mask=mask, cmap=cmap, center=0, vmin=-1, vmax=1,
                    square=True, linewidths=.5, cbar_kws={"shrink": .5})

        corr_path = os.path.join(self.config["report_dir"], "corr.svg")
        plt.savefig(corr_path, format="svg", bbox_inches="tight")
        
        return corr_path

class AntiAnalysis:
    def __init__(self, config={}):
        sns.set_theme(style="whitegrid", font="MiSans,MiSans Normal")
        cfg = CloseWorld.Config()
        self.dataset_path = cfg.dataset_path
        self.output_dir = config.get("output_dir", "data/anti_analysis")
        if not os.path.exists(self.output_dir):
            os.makedirs(self.output_dir)

    def _load_data(self, **kwargs):
        cw = CloseWorld()
        return cw._load_data(self.dataset_path, **kwargs)

    def feature_RFE(self):
        """This function is useless.
        """
        cw = CloseWorld()
        clf = cw.tuned_candidates["DT"]["model"]
        X_train, X_test, y_train, y_test = self._load_data(num_of_domains=100)
        rfe = RFE(clf, n_features_to_select=47)
        rfe.fit(X_train, y_train)
        print(rfe.support_)
        print(rfe.ranking_)
        pass

    def feature_tree(self):
        df = pd.read_csv(self.dataset_path)
        df = df.iloc[:, :-1]
        df = df.loc[:, df.std() > 0]

        feature_name2id = Dataset().feature_name2id
        feature_name2title = Dataset().feature_name2zh_name
        names = list(df.columns)
        name_with_id = []
        for name in names:
            name_with_id.append("{} {}".format(feature_name2title[name], feature_name2id[name]))

        cw = CloseWorld()
        clf = cw.tuned_candidates["RF"]["model"]
        X_train, X_test, y_train, y_test = self._load_data(num_of_domains=1000)
        clf.fit(X_train, y_train)
        importances = clf.feature_importances_

        # print(len(names), len(importances))
        # for feature_name, importance in zip(names, importances):
        #     print(feature_name, importance)

        self.feature_rank(name_with_id, importances)

    def feature_F(self):
        """Visualize the importance of feature.
        """
        X_train, X_test, y_train, y_test = self._load_data()
        features = X_train
        labels = y_train

        df = pd.read_csv(self.dataset_path, nrows=1)
        df = df.iloc[:,:-1]
        df = df.loc[:, df.std() > 0]
        names = list(df.columns)
        # return
        selector = SelectKBest(k="all")
        selector.fit(features, labels)
        scores = selector.scores_
        self.feature_rank(names, scores, log=True)

    def feature_rank(self, names:list, scores:list, log=False):
        sns.set_theme(style="ticks", font="MiSans,MiSans Normal")
        # sort
        named_scores = zip(names, scores)
        sorted_named_scores = sorted(named_scores, key=lambda x: x[1], reverse=True)

        sorted_names = [_[0] for _ in sorted_named_scores]
        sorted_scores = [_[1] for _ in sorted_named_scores]

        df = pd.DataFrame()
        df_list = []
        idx = 0
        for name, score in sorted_named_scores:
            df_list.append(pd.DataFrame({
                "Feature": name,
                "Score": score,
            }, index=[idx]))
            idx += 1

        df = pd.concat(df_list)
        # draw
        # sns.barplot(
        #     data=df,
        #     x="Score",
        #     y="Feature",
        # )

        y_pos = np.arange(len(names))
        fig, ax = plt.subplots(figsize=(8, 8))
        ax.barh(y_pos, sorted_scores, height=0.8, tick_label=sorted_names, color=["royalblue", "dimgray"], alpha=0.5)
        ax.spines['top'].set_visible(False)
        ax.spines['right'].set_visible(False)
        ax.spines['left'].set_visible(False)
        ax.tick_params(left=False)

        ax.invert_yaxis()
        if log:
            ax.set(
                # yticks = y_pos,
                xlabel = "分值",
                ylabel = "特征名",
                xscale = "log",
            )
        else:
            ax.set(
                # yticks = y_pos,
                xlabel = "分值",
                ylabel = "特征名",
            )     

        for score, pos in zip(sorted_scores, y_pos):
            ax.text(score, pos+0.3, "{:.3f}".format(round(score, 3)))

        feature_rank_path = os.path.join(self.output_dir, "feature_rank.svg")
        plt.savefig(feature_rank_path, format="svg", bbox_inches="tight")
        
        return feature_rank_path

class CloseWorldResult():
    def __init__(self, config:dict={}):
        # sns.set_theme(style="whitegrid")
        sns.set_theme(style="ticks")
        self.result_dir = config.get("result_dir", "data/close_world/tuned2")
        self.dir_to_save_table = config.get("dir_to_save_table", "data/close_world/tables")
        self.dir_to_save_fig = config.get("dir_to_save_fig", "data/close_world")

        self.en2zh = {
            "Accuracy": "准确率",
            "Precision": "精准率",
            "Recall": "召回率",
            "F1_score": "F1 值"
        }

    def _load_result(self) -> dict:
        # -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]:
        """Load result of close world experiment, including 
        accuracy, macro F1, macro precision and macro recall.

        Return 4 Dataframe like:

                Accuracy table

                NB  KNN DT  RF  SVM
        100     *   *   *   *   *
        200     *   *   *   *   *
        ... 
        1000    *   *   *   *   *

        Returns:
            A dict consist of accuracy, macro F1, macro precision and macro recall tables.
        """
        result_files = glob.glob(os.path.join(self.result_dir, "**.json"))

        metric2metric_dict = {
            "Accuracy": {},
            "Precision": {},
            "Recall": {},
            "F1_score": {},            
        }
        keys = ["Accuracy", "Precision", "Recall", "F1_score"]

        for result_file in result_files:
            with open(result_file) as f_in:
                data = json.load(f_in)
                accuracy, macro_avg = data["accuracy"], data["macro_avg"]
                precision, recall, f1_score = macro_avg["precision"], macro_avg["recall"],  macro_avg["f1-score"]

            model_name = self._get_model_name_by_filename(result_file)
            domain_num = self._get_domain_num_by_filename(result_file)
            metric_list = [accuracy, precision, recall, f1_score]

            for key, metric in zip(keys, metric_list):
                if model_name not in metric2metric_dict[key]:
                    metric2metric_dict[key][model_name] = {}
                    metric2metric_dict[key][model_name][domain_num] = metric
                else:
                    metric2metric_dict[key][model_name][domain_num] = metric

        metric2metric_df = {}

        for key in metric2metric_dict:
            df = pd.DataFrame.from_dict(metric2metric_dict[key])
            df.sort_index(inplace=True)
            metric2metric_df[key] = df

        return metric2metric_df

    def line_chart(self):
        """Draw line chart.
        """
        titles = ["(a) 准确率", "(b) 宏观精准率", "(c) 宏观召回率", "(d) 宏观 F1 值"]
        sns.set_theme(style="whitegrid", font="MiSans,MiSans Normal")
        # plt.figure(dpi=300)
        fig, ax_arr = plt.subplots(2, 2, figsize=(12, 9))
        fig.subplots_adjust(hspace=0.4)
        df_dict = self._load_result()

        for idx, (key, title) in enumerate(zip(df_dict, titles)):
            df = df_dict[key]
            df.sort_index(axis=1,inplace=True) 
            ax = ax_arr[idx//2][idx%2]
            # ax.axis("tight")
            ax.set_title(title, x=0.5,y=-0.3)
            ax.axis([100, 1000, 0.3, 1.0])
            ax.set_xlabel("数据集大小（包含域名个数）")
            ax.set_ylabel("{}（{}）".format(self.en2zh[key], key))
        
            sns.lineplot(
                data=df,
                linewidth=2,
                markers=True,
                markersize=9,
                ax=ax,
                clip_on=False,
                palette=sns.hls_palette(len(df.columns)),
            )
            # sns.scatterplot(data=df, ax=ax)

        cwr_path = os.path.join(self.dir_to_save_fig, "cwr.svg")
        plt.savefig(cwr_path, format="svg", bbox_inches="tight")
        
        return cwr_path
            
    def show(self):
        """Print result.
        """
        df_dict = self._load_result()

        for key in df_dict:
            print(key)
            print(df_dict[key])
            print("-" * 100)

    def save_csv(self, round=True) -> list:
        """Save result as csv files.
        """
        df_dict = self._load_result()
        file_path_list = []

        if not os.path.exists(self.dir_to_save_table):
            os.makedirs(self.dir_to_save_table)

        for key in df_dict:
            file_path = os.path.join(self.dir_to_save_table, key+".csv")
            df_dict[key].to_csv(file_path, float_format="%.5f")
            file_path_list.append(file_path)

        return file_path_list
        
    def _get_model_name_by_filename(self, filename:str) -> str:
        """get model name by filename or path.

        Example filename: report-DT-100-20220311111938.json
        corresponding model name: DT

        Args:
            filename: File name or path.

        Returns:
            Model name.
        """
        return filename.rsplit("/", 1)[1].split("-")[1]

    def _get_domain_num_by_filename(self, filename:str) -> int:
        """get number of domain by filename or path.
        """
        return int(filename.rsplit("/", 1)[1].split("-")[2])

    def confusion_matrix_visualize(self, model_name:str="RF", num_of_domains:int=10):
        """Draw reading friendly confusion matrix.
        """
        from close_world import CloseWorld
        cw = CloseWorld()
        matrix = cw.confusion_matrix(model_name, num_of_domains)
        sns.set_theme()

        plt.figure(figsize=(16,9), dpi=300)
        sns.heatmap(matrix, cmap="gray_r")

    def _load_time(self, num_of_domains:int=1000) -> dict:
        """Load time.
        """
        model2time = {}
        result_files = glob.glob(os.path.join(self.result_dir, "**-{}-**.json".format(num_of_domains)))

        for result_file in result_files:
            model_name = self._get_model_name_by_filename(result_file)
            with open(result_file) as f_in:
                data = json.load(f_in)
                duration = data["duration"]
                model2time[model_name] = duration

        return model2time
    
    def time_visualize(self):
        """Time visualize.
        """
        sns.set_theme(style="whitegrid", font="MiSans,MiSans Normal")
        model2time_all = {}
        for i in range(100, 1100, 100):
            model2time = self._load_time(i)
            for key in model2time:
                if key not in model2time_all:
                    model2time_all[key] = {}
                    model2time_all[key][i] = model2time[key]
                else:
                    model2time_all[key][i] = model2time[key]
        
        df = pd.DataFrame.from_dict(model2time_all)
        df.sort_index(axis=1, inplace=True) 
        
        plt.figure(figsize=(8,5), dpi=300)
        ax = sns.lineplot(
            data=df,
            linewidth=2,
            markers=True,
            markersize=10,
            palette=sns.hls_palette(len(df.columns)),
            clip_on=False,
        )
        ax.set(
            xlabel="数据集大小（包含域名个数）",
            ylabel="耗时（秒）",
            yscale="log",
            xlim=(100, 1000),
            # ylim=(0.01, 100)
        )

        # print(df)
        time_path = os.path.join(self.dir_to_save_fig, "time.svg")
        plt.savefig(time_path, format="svg", bbox_inches="tight")
        
        return time_path

    def violinplot(self, cv_result_file:str="auto", remove=None, output_static_info:bool=True):
        """Draw boxplot.

        A example line in cv_result_file:

        KNN,0.98856638,0.9790027,0.98894347,0.97473751,0.97649344

        Args:
            cv_result_file: Path to cross validation result file.

        """
        sns.set_theme(style="whitegrid", font="MiSans,MiSans Normal")
        if cv_result_file == "auto":
            # select latest cv_result_file
            from close_world import CloseWorld
            cw = CloseWorld()
            output_dir = cw.cfg.output_dir
            files = glob.glob(os.path.join(output_dir, "cv*.csv"))
            time_list = [_.split("/")[-1].split("-")[-1].split(".")[0] for _ in files]
            latest_time = sorted(time_list)[-1]
            cv_result_file = glob.glob(os.path.join(output_dir, "*{}.csv".format(latest_time)))[0]

        results, model_names = [], []
        with open(cv_result_file, "r") as f_in:
            for line in f_in.readlines():
                line = line.strip().split(",")
                model_name, scores = line[0], line[1:-1]
                model_names.append(model_name)
                results.append(scores)

        if remove is not None:
            # remove specified model
            rm_idx_list = []
            results_temp, model_names_temp = [], []

            for m in remove:
                for idx, model_name in enumerate(model_names):
                    if m == model_name:
                        rm_idx_list.append(idx)

            for idx, (model_name, score) in enumerate(zip(model_names, results)):
                if idx not in rm_idx_list:
                    results_temp.append(score)
                    model_names_temp.append(model_name)

            results, model_names = results_temp, model_names_temp

        plt.figure(figsize=(6, 4), dpi=300)
        # plt.figure()
        ax = sns.violinplot(
            data=results,
            # color="gray",
            saturation=.5,
        )
        ax.set_xticklabels(model_names)
        ax.set(
            xlabel="模型",
            ylabel="宏观 F1 值",
        )
        if output_static_info:
            print("model\tmin\tmax\tptp\tmean\tstd")
            for model, score in zip(model_names, results):
                vals = np.array([float(_) for _ in score])
                print("{}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}"\
                    .format(model, vals.min(), vals.max(), vals.ptp(), vals.mean(), vals.std()))

        violinplot_path = os.path.join(self.dir_to_save_fig, "violinplot.svg")
        plt.savefig(violinplot_path, format="svg", bbox_inches="tight")
        
        return violinplot_path

class OpenWorldResult():
    def __init__(self, config:dict={}):
        # sns.set_theme(style="ticks")
        sns.set_theme(style="whitegrid", font="MiSans,MiSans Normal")
        self.result_dir = config.get("result_dir", "data/open_world/20220403183901")
        self.output_dir = config.get("output_dir", "data/open_world")

    def line_chart(self, plot=True):
        # fig, ax_arr = plt.subplots(1, 2, figsize=(16,6))

        irrelevant = ["1000.0", "accuracy", "macro avg", "weighted avg"]
        lof_files = glob.glob(os.path.join(self.result_dir, "LOF*.json"))
        rf_files = glob.glob(os.path.join(self.result_dir, "RF*.json"))
        lof_files.extend(rf_files)
        files = lof_files

        df_list = []
        idx = 0
        for file in files:
            with open(file, "r") as f_in:
                data = json.load(f_in)

            model = data["model"]
            target_proportion = data["target_proportion"]
            results = data["results"]

            for result in results:
                avg_precision_list, avg_recall_list = [], []
                
                for k, v in result.items():
                    if k not in irrelevant:
                        avg_precision_list.append(v["precision"])
                        avg_recall_list.append(v["recall"])
            
                avg_precision = np.mean(avg_precision_list)
                avg_recall = np.mean(avg_recall_list)

                df = pd.DataFrame({
                    "Model": model,
                    "Target proportion": target_proportion,
                    "Score": avg_precision,
                    "Metric": "Precision",
                }, index=[idx])
                idx += 1
                df_list.append(df)
                df = pd.DataFrame({
                    "Model": model,
                    "Target proportion": target_proportion,
                    "Score": avg_recall,
                    "Metric": "Recall",
                }, index=[idx])
                idx += 1
                df_list.append(df)

        df = pd.concat(df_list)

        if plot:
            fig, ax =plt.subplots(1,2, figsize=(12,5))
            sns.lineplot(    
                data=df[df["Metric"]=="Precision"],
                x="Target proportion",
                y="Score",
                hue="Model",
                style="Model",
                markers=True,
                ax=ax[0],
                clip_on=False,
            )
            ax[0].set(
                ylim=(0, 1),
                xlim=(0.01, 0.05),
                xlabel="目标域名对应的流量占所有流量的比重",
                ylabel="精准率（Precision）",
            )
            # ax[0].set_title("(a) 宏观精准率", x=0.5,y=-0.25)
            ax[0].set_title("")

            sns.lineplot(    
                data=df[df["Metric"]=="Recall"],
                x="Target proportion",
                y="Score",
                hue="Model",
                style="Model",
                markers=True,
                ax=ax[1],
                clip_on=False,
            )
            ax[1].set(
                ylim=(0, 1),
                xlim=(0.01, 0.05),

                xlabel="目标域名对应的流量占所有流量的比重",
                ylabel="召回率（Recall）",
            )
            # ax[1].set_title("(b) 宏观召回率", x=0.5,y=-0.25)
            ax[1].set_title("")



        owr_path = os.path.join(self.output_dir, "owr.svg")
        plt.savefig(owr_path, format="svg", bbox_inches="tight")
        
        return owr_path

    def table(self):
        df = self.line_chart(plot=False)
        df_mean = df.set_index(['Model', 'Metric', 'Target proportion'])
        df_mean = df_mean.groupby(level = [0, 1, 2])['Score'].agg('mean')
        df_mean.to_csv(os.path.join(self.output_dir, "results.csv"))
        print(df_mean)

def test():
    cwr_config = {
        "result_dir": "data/close_world/tuned2",
    }
    cwr = CloseWorldResult(cwr_config)
    filename = "data/close_world/tuned/report-DT-100-20220311111938.json"
    print(cwr._get_model_name_by_filename(filename))
    print(cwr._get_domain_num_by_filename(filename))

def dataset():
    config = {
        "dataset_path": "data/dataset/dataset-nfstream-20220304210805-shuffled.csv",
        "output_dir": "data/dataset_report"
    }

    a = Dataset(config)
    # a.describe()
    # a.shape()
    # a.show_type()
    # a.show_distribution()
    # a.histogram()
    # a.density()
    # a.correlation()
    a.feature_ranking()

def close_world():
    cwr_config = {
        "result_dir": "data/close_world/tuned2",
    }
    cwr = CloseWorldResult(cwr_config)

    # cwr._load_result()
    # cwr.line_chart()
    # cwr.show()
    # cwr.save_csv()
    # cwr.boxplot(palette=sns.hls_palette(5))
    cwr.time_visualize()

def open_world():
    owr = OpenWorldResult()
    owr._load_data()
if __name__ == "__main__":
    # close_world()
    # test()
    # dataset()
    open_world()