import random
import os
import pickle
import numpy as np
import gzip
from sklearn.metrics import multilabel_confusion_matrix, roc_auc_score, auc , f1_score,roc_curve
from tqdm import tqdm
import jsonlines
from sklearn.metrics import  accuracy_score,classification_report
import pandas as pd
from io import StringIO
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from sklearn.metrics import confusion_matrix, classification_report, average_precision_score, precision_recall_curve
from IPython.display import display
from numpy import interp
import keras

from matplotlib.ticker import FuncFormatter

class abs_model:
    def __init__(self, name, randseed):
        self.database = './data/'
        self.name = name
        self.rand = random.Random(x = randseed)
        self.data = None
        self.model = None
        self.full_rdata = []
        self.dataset = None

    def data_exists(self):
        return  os.path.exists(self.data)
    
    def model_exist(self):
        return  os.path.exists(self.model)

    def train(self):
        pass

    def test(self):
        pass

    def parse_raw_data(self):
        pass

    def save_model(self):
        pass

    def load_model(self):
        pass
    
    def method(self):
        f = os.getcwd()
        return f.split('/')[-2]
    
    def walk_to_find_labeled_dir(self,dataset_path):
        labeled_dirs = []
        for root, dirs, files in os.walk(dataset_path):
            if "flowmeter.log" in files:
                labeled_dirs.append(root)
        return labeled_dirs
    
    def parse_flowmeter_log_for_directed_packet_length_with_ts(self, pad_sequence, min_flow_len):
        full_rdata = self.full_rdata
        if os.path.exists(full_rdata) == False:
            raise OSError('Dataset {0} (full path: {1}) does not exist!'.format(self.dataset, full_rdata))
        os.makedirs(self.data, exist_ok=True)
        flow_dict = {}
        labeled_dirs = [os.path.join(full_rdata, labeled_dir) for labeled_dir in os.listdir(full_rdata) if
                        os.path.isdir(os.path.join(full_rdata, labeled_dir)) and os.path.exists(os.path.join(full_rdata, labeled_dir, "flowmeter.log"))]
        #print("labeled_dirs:", labeled_dirs)
        for index in tqdm(range(len(labeled_dirs))):
            labeled_dir = labeled_dirs[index]
            label = str(index)
            file = os.path.join(labeled_dir, "flowmeter.log")
            if not os.path.exists(file):
                continue
            #print("file:", file)
            if label not in flow_dict:
                flow_dict[label] = []
            with jsonlines.open(os.path.join(labeled_dir, "flowmeter.log"), mode='r') as flowmeter_results:
                for flowmeter_row in tqdm(flowmeter_results):
                    packet_timestamp_vector_np = np.array(flowmeter_row['packet_timestamp_vector'])
                    arrive_time_delta_np = packet_timestamp_vector_np - packet_timestamp_vector_np[0]
                    packet_direction_vector_np = 2 * np.array(flowmeter_row["packet_direction_vector"]) - 1
                    packet_payload_size_np = np.array(
                        flowmeter_row['packet_payload_size_vector']) * packet_direction_vector_np
                    one_line = {"uid": flowmeter_row['uid'],
                                'packet_length': packet_payload_size_np.tolist(),
                                'arrive_time_delta': arrive_time_delta_np.tolist()
                                }
                    pkt_size = one_line['packet_length']
                    if len(pkt_size) < min_flow_len:
                        continue
                    x = pad_sequence(pkt_size)
                    flow_dict[label].append(x)
        return flow_dict
    
    def save_data(self,X_train, y_train, X_valid, y_valid, X_test, y_test):
        fp = gzip.GzipFile(self.data + 'data.gzip','wb')
        pickle.dump({
            'X_train':X_train,
            'y_train':y_train,
            'X_valid':X_valid,
            'y_valid':y_valid,
            'X_test':X_test,
            'y_test':y_test
        },file=fp)
        fp.close()
    
    def load_data(self):
        fp = gzip.GzipFile(self.data + 'data.gzip','rb')
        data = pickle.load(fp)
        fp.close()
        X_train = data['X_train']
        y_train = data['y_train']
        X_valid = data['X_valid']
        y_valid = data['y_valid']
        X_test = data['X_test']
        y_test = data['y_test']
        import random
        indexs = [x for x in range(len(y_test))]
        random.shuffle(indexs)
        return np.array(X_train), np.array(y_train), np.array(X_valid), np.array(y_valid), np.array(X_test)[indexs], np.array(y_test)[indexs]

    # def num_classes(self):
    #     # return len([i for i in os.listdir(self.full_rdata) if os.path.isdir(os.path.join(self.full_rdata,i))])
    #     class_dir = [i for i in os.listdir(self.full_rdata) if os.path.isdir(os.path.join(self.full_rdata,i))
    #                  and os.path.exists(os.path.join(self.full_rdata,i,"flowmeter.log"))]
    #     #print(class_dir)
    #     return len(class_dir)
    def class_dirs(self):
        class_dir = [i for i in os.listdir(self.full_rdata) if os.path.isdir(os.path.join(self.full_rdata,i))
                     and os.path.exists(os.path.join(self.full_rdata,i,"flowmeter.log"))]
        return class_dir

    def num_classes(self):
        class_dir = self.walk_to_find_labeled_dir(self.full_rdata)
        print(class_dir)
        return len(class_dir)
    
    def fpr_tpr_auc(self,  y_pred, y_real,y_pred_logit=None):
        labels =set()
        for each in y_real:
            labels.add(each)
        labels =list(labels)
        #print(labels)
        mcm = multilabel_confusion_matrix(y_true=y_real,y_pred=y_pred,labels=labels)
        #print(mcm)
        fp ={}
        tp ={}
        fn ={}
        tn ={}
        for i in range(len(labels)):
            fp.setdefault(labels[i],mcm[i,0,1])
            tp.setdefault(labels[i],mcm[i,1,1])
            fn.setdefault(labels[i],mcm[i,1,0])
            tn.setdefault(labels[i],mcm[i,0,0])
        acc={}
        fpr={}
        tpr={}
        for each in fp:
            acc.setdefault(each,(tp[each]+tn[each])/(fp[each]+tn[each]+fn[each]+tp[each]))
            fpr.setdefault(each,fp[each]/(fp[each]+tn[each]))
            tpr.setdefault(each,tp[each]/(tp[each]+fn[each]))

        print('tpr:',tpr)

        print('fpr:',fpr)
        # auc = roc_auc_score(y_true=y_real, y_score=y_pred_logit[:,1])
        # print('auc (prob):', auc)


        if self.num_classes() <= 2:
            auc = roc_auc_score(y_true=y_real, y_score=y_pred)
        else:
            auc = roc_auc_score(y_true=y_real, y_score=y_pred_logit, average="macro" , multi_class='ovr')
            #for multi class, y_score should be probablity 
        print('auc (label):', auc)
        
        return tpr,fpr,auc
    
    def plot_confusion_matrix(self,y_true,
                          y_pred,
                          classes=None,
                          size=(10, 10),
                          normalize=False,
                          title=None,
                          print_raw=False,
                          cmap=plt.cm.Blues):
        """
        This function prints and plots the confusion matrix.
        Normalization can be applied by setting `normalize=True`.

        :param y_true: True labels.
        :param y_pred: Predicted labels.
        :param classes: List of class names.
        :param size: Size of the plot.
        :param normalize: If True values of the confusion matrix will be normalized.
        :param title: Title of the plot.
        :param print_raw: If True the raw confusion matrix is printed.
        :param cmap: Color map
        """
        if not title:
            if normalize:
                title = 'Normalized confusion matrix'
            else:
                title = 'Confusion matrix'

        # Compute confusion matrix
        cm = confusion_matrix(y_true, y_pred)

        if normalize:
            cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]

        if print_raw:
            print(cm)

        fig, ax = plt.subplots(figsize=size)
        im = ax.matshow(cm, interpolation='nearest', cmap=cmap)
        ax.figure.colorbar(im, ax=ax)
        ax.set(title=title,
            ylabel='True label',
            xlabel='Predicted label')

        if classes is not None:
            x_labels = classes
            y_labels = classes

            ax.set(xticks=np.arange(cm.shape[1]),
                yticks=np.arange(cm.shape[0]),
                xticklabels=x_labels,
                yticklabels=y_labels)

        plt.margins(2)
        ax.tick_params(axis="x", bottom=True, labelbottom=True, top=False, labeltop=False, rotation=45)

        # Rotate the tick labels and set their alignment.
        plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
                rotation_mode="anchor")

        # Loop over data dimensions and create text annotations.
        fmt = '.2f' if normalize else 'd'
        thresh = cm.max() / 2.
        for i in range(cm.shape[0]):
            for j in range(cm.shape[1]):
                ax.text(j, i, format(cm[i, j], fmt),
                        ha="center", va="center",
                        color="white" if cm[i, j] > thresh else "black")
        fig.tight_layout()
        return ax

    def get_misclassifications(self, y_true, pred):
        class_dict = {}
        for i,dir in enumerate(self.class_dirs()):
            class_dict[i] = dir
        print(class_dict)
        column = ['label']
        misclassifications= pd.DataFrame(columns=column)
        y = pd.DataFrame(columns=column)
        for i in range(len(y_true)):
            new_row = {'label': class_dict[y_true[i]]}
            y.loc[len(y.index)] = new_row
            if y_true[i] != pred[i]:
                misclassifications.loc[len(y.index)] = new_row
        #misclassifications = y[y_true != pred]    
        mc_df = pd.merge(pd.DataFrame({'misclassified': misclassifications.label.value_counts()}),
                        pd.DataFrame({'total': y.label.value_counts()}),
                        how='right', left_index=True, right_index=True)
        mc_df = mc_df.fillna(0)
        mc_df['percent_misclassified'] = mc_df.apply(lambda x: x[0] / x[1], axis=1)
        return mc_df.sort_values('percent_misclassified', ascending=False)

    def plot_roc_curve(self,y_real, y_pred, size=(8, 5), average='macro'):
        fpr = dict()
        tpr = dict()
        roc_auc = dict()
        n_classes = self.num_classes()
        # First aggregate all false positive 
        y_real = keras.utils.to_categorical(y_real)
        y_pred = keras.utils.to_categorical(y_pred)
        # print(y_real)
        # print(y_pred)
        # print(n_classes)
        for i in range(n_classes):
            fpr[i], tpr[i], _ = roc_curve(y_real[:, i], y_pred[:, i])
            roc_auc[i] = auc(fpr[i], tpr[i])
        print([fpr[i] for i in range(n_classes)])
        all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
        
        # Then interpolate all ROC curves at this points
        mean_tpr = np.zeros_like(all_fpr)
        for i in range(n_classes):
            mean_tpr += interp(all_fpr, fpr[i], tpr[i])
        
        # Finally average it and compute AUC
        mean_tpr /= n_classes
        
        fpr["macro"] = all_fpr
        tpr["macro"] = mean_tpr
        roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
        print(fpr["macro"],tpr["macro"],roc_auc["macro"])

        labels = self.class_dirs()
        lw = 2
        
        plt.figure()
        plt.plot(fpr["macro"], tpr["macro"],
                label='macro-average ROC curve (area = {0:0.4f})'
                    ''.format(roc_auc["macro"]),
                color='blue', linestyle='-', linewidth=4)
        
        plt.plot([0, 1], [0, 1], 'k--', lw=lw)
        plt.xlim([0.0, 1.0])
        plt.ylim([0.0, 1.05])
        plt.xlabel('1-Specificity (%)')
        plt.ylabel('Sensitivity (%)')
        plt.title('Some extension of Receiver operating characteristic to multi-class')
        def to_percent(temp, position):
            return '%1.0f'%(100*temp)
        plt.gca().yaxis.set_major_formatter(FuncFormatter(to_percent))
        plt.gca().xaxis.set_major_formatter(FuncFormatter(to_percent))
        plt.legend(loc="lower right")
        return roc_auc["macro"]


    def result(self,y_pred, y_real,y_pred_logit=None):
        accuracy = accuracy_score(y_real,y_pred)
        
        f1 = f1_score(y_real, y_pred, average='macro')
        # if self.num_classes() <= 2:
        #     tpr,fpr,auc = self.fpr_tpr_auc(y_pred=y_pred, y_real=y_real)
        # else: 
        #     tpr,fpr,auc = self.fpr_tpr_auc(y_pred=y_pred, y_real=y_real, y_pred_logit=y_pred_logit)
        
        self.plot_confusion_matrix(y_true = y_real, y_pred=y_pred, classes=np.array(self.class_dirs()),size=(5,5))
        
        mc_df = self.get_misclassifications(y_real, y_pred)
        #print(mc_df)
        display(mc_df)

        auc = self.plot_roc_curve(y_real, y_pred)
        #plt.show()
        # df.to_csv(self.name + ".csv",index=False)

        metric_str = self.dataset + "," + self.name + "," + str(accuracy) + "," + str(f1) + "," + str(auc)
        #print(metric_str)
        df = pd.read_csv('../../output.csv')
        # df = df.append(new_data, ignore_index=True)
        print(self.method())
        new_data = {'dataset': self.dataset, 'method': self.method(),'model_name': self.name, 'accuracy':accuracy , 'f1-score':f1 , 'auc(label)': auc}
        new_df = pd.DataFrame([new_data])
        df = pd.concat([df, new_df], ignore_index=True)
        df.to_csv('../../output.csv', index=False)


