import json
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.tree import ExtraTreeRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import Lasso
from sklearn.neural_network import MLPClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import svm
from sklearn.neighbors import KNeighborsClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
from sklearn import metrics

model = RandomForestClassifier()
details = True
ratio = 1.2
selected_feature = ['throughput' , 'rand_data_time', 'avg_iter_time']
scale = False



if __name__ == '__main__':

    with open('./collect/2080ti/pair.json') as json_file:
        json_data = json.load(json_file)

    model_list = ['bert', 'mnist_fcn5', 'cifar10_resnet20', 'imagenet_resnet50', 'cifar10_resnet110',
                 'imagenet_ofa', 'imagenet_vgg16', 'deepspeech', 'mnist_lr', 'deepspeech_nr', 
                 'imagenet_densenet201', 'imagenet_vgg13', 'imagenet_vgg19']

    feature_dict = {
        'bert':                 {'fcn': 73, 'cnn': 0,    'rnn': 0, 'GFLOPs': 32.6},
        'mnist_fcn5':           {'fcn': 3,  'cnn': 0,    'rnn': 0, 'GFLOPs': 0.000199},
        'mnist_lr':             {'fcn': 1,  'cnn': 0,    'rnn': 0, 'GFLOPs': 0.000008},
        'cifar10_resnet20':     {'fcn': 1,  'cnn': 18,   'rnn': 0, 'GFLOPs': 0.040997},
        'imagenet_resnet50':    {'fcn': 1,  'cnn': 49,   'rnn': 0, 'GFLOPs': 4.14},
        'cifar10_resnet110':    {'fcn': 1,  'cnn': 109,  'rnn': 0, 'GFLOPs': 0.254991},
        'imagenet_ofa':         {'fcn': 1,  'cnn': 17,   'rnn': 0, 'GFLOPs': 0.015174},
        'imagenet_vgg16':       {'fcn': 3,  'cnn': 13,   'rnn': 0, 'GFLOPs': 15.48},
        'deepspeech':           {'fcn': 1,  'cnn': 2,    'rnn': 5, 'GFLOPs': 38.9},
        'deepspeech_nr':        {'fcn': 1,  'cnn': 2,    'rnn': 7, 'GFLOPs': 25.8},
        'imagenet_densenet201': {'fcn': 1,  'cnn': 120,  'rnn': 0, 'GFLOPs': 2.86},
        'imagenet_vgg13':       {'fcn': 3,  'cnn': 10,   'rnn': 0, 'GFLOPs': 11.32},
        'imagenet_vgg19':       {'fcn': 3,  'cnn': 16,   'rnn': 0, 'GFLOPs': 19.64},
    }

    print('ratio: %.2f' % ratio)

    for model_name in model_list:
        X_train, X_test, Y_train, Y_test = [], [], [], [] 
        for jk in json_data.keys():
            t_jk = eval(jk)
            if t_jk[0] == model_name or t_jk[1] == model_name:
                continue
            for bk in json_data[jk].keys():
                t_bk = eval(bk)
                data0 = [t_bk[0]] + list(feature_dict[t_jk[0]].values()) 
                data1 = [t_bk[1]] + list(feature_dict[t_jk[1]].values()) 
                single_job_log_name0 = 'single-'+t_jk[0]+'-b'+str(t_bk[0])
                single_job_log_name1 = 'single-'+t_jk[1]+'-b'+str(t_bk[1])
                mix_job_log_name0 = 'mix-'+t_jk[0]+'-b'+str(t_bk[0])
                mix_job_log_name1 = 'mix-'+t_jk[1]+'-b'+str(t_bk[1])

                if details:
                    for tk in sorted(json_data[jk][bk][single_job_log_name0]):
                        if tk in selected_feature:
                            data0.append(json_data[jk][bk][single_job_log_name0][tk])
                            data1.append(json_data[jk][bk][single_job_log_name1][tk])

                else:
                        data0.append(json_data[jk][bk][single_job_log_name0]['avg_iter_time'])
                        data1.append(json_data[jk][bk][single_job_log_name1]['avg_iter_time'])

                X_train.append(data0+data1)
                X_train.append(data1+data0)
                Y_train.append(json_data[jk][bk][mix_job_log_name0]['avg_iter_time'] < ratio * json_data[jk][bk][single_job_log_name0]['avg_iter_time'])
                Y_train.append(json_data[jk][bk][mix_job_log_name1]['avg_iter_time'] < ratio * json_data[jk][bk][single_job_log_name1]['avg_iter_time'])

        for jk in json_data.keys():
            t_jk = eval(jk)
            if t_jk[0] != model_name and t_jk[1] != model_name:
                continue
            for bk in json_data[jk].keys():
                t_bk = eval(bk)

                data0 = [t_bk[0]] + list(feature_dict[t_jk[0]].values()) 
                data1 = [t_bk[1]] + list(feature_dict[t_jk[1]].values()) 
                single_job_log_name0 = 'single-'+t_jk[0]+'-b'+str(t_bk[0])
                single_job_log_name1 = 'single-'+t_jk[1]+'-b'+str(t_bk[1])
                mix_job_log_name0 = 'mix-'+t_jk[0]+'-b'+str(t_bk[0])
                mix_job_log_name1 = 'mix-'+t_jk[1]+'-b'+str(t_bk[1])

                if details:
                    for tk in sorted(json_data[jk][bk][single_job_log_name0]):
                        if tk in selected_feature:
                            data0.append(json_data[jk][bk][single_job_log_name0][tk])
                            data1.append(json_data[jk][bk][single_job_log_name1][tk])

                else:
                        data0.append(json_data[jk][bk][single_job_log_name0]['avg_iter_time'])
                        data1.append(json_data[jk][bk][single_job_log_name1]['avg_iter_time'])

                X_test.append(data0+data1)
                X_test.append(data1+data0)
                Y_test.append(json_data[jk][bk][mix_job_log_name0]['avg_iter_time'] < ratio * json_data[jk][bk][single_job_log_name0]['avg_iter_time'])
                Y_test.append(json_data[jk][bk][mix_job_log_name1]['avg_iter_time'] < ratio * json_data[jk][bk][single_job_log_name1]['avg_iter_time'])

        if scale:
            scaler = StandardScaler()
            scaler.fit(X_train)
            X_train = scaler.transform(X_train)
            X_test = scaler.transform(X_test)

        model.fit(X_train, Y_train)
        Y_pred = model.predict(X_test)
        Y_proba = model.predict_proba(X_test)[:,1] 
        fpr, tpr, _ = metrics.roc_curve(Y_test, Y_proba)    # 获取真阳率、伪阳率、阈值
        auc = metrics.auc(fpr, tpr)                              # AUC得分
        score = metrics.accuracy_score(Y_test, Y_pred)                 # 模型准确率
        print(model_name, 'AUC: %.3f, Accuracy: %.3f' % (auc,  score))

    feature, cost_time = [], []
    for jk in json_data.keys():
        t_jk = eval(jk)
        for bk in json_data[jk].keys():
            t_bk = eval(bk)

            data0 = [t_bk[0]] + list(feature_dict[t_jk[0]].values()) 
            data1 = [t_bk[1]] + list(feature_dict[t_jk[1]].values()) 
            single_job_log_name0 = 'single-'+t_jk[0]+'-b'+str(t_bk[0])
            single_job_log_name1 = 'single-'+t_jk[1]+'-b'+str(t_bk[1])
            mix_job_log_name0 = 'mix-'+t_jk[0]+'-b'+str(t_bk[0])
            mix_job_log_name1 = 'mix-'+t_jk[1]+'-b'+str(t_bk[1])

            if details:
                for tk in sorted(json_data[jk][bk][single_job_log_name0]):
                    if tk in selected_feature:
                        data0.append(json_data[jk][bk][single_job_log_name0][tk])
                        data1.append(json_data[jk][bk][single_job_log_name1][tk])

            else:
                    data0.append(json_data[jk][bk][single_job_log_name0]['avg_iter_time'])
                    data1.append(json_data[jk][bk][single_job_log_name1]['avg_iter_time'])

            feature.append(data0+data1)
            feature.append(data1+data0)
            cost_time.append(json_data[jk][bk][mix_job_log_name0]['avg_iter_time'] < ratio * json_data[jk][bk][single_job_log_name0]['avg_iter_time'])
            cost_time.append(json_data[jk][bk][mix_job_log_name1]['avg_iter_time'] < ratio * json_data[jk][bk][single_job_log_name1]['avg_iter_time'])

    X_train, X_test, Y_train, Y_test = train_test_split(feature, cost_time, test_size=0.8)
    if scale:
        scaler = StandardScaler()
        scaler.fit(X_train)
        X_train = scaler.transform(X_train)
        X_test = scaler.transform(X_test)

    # train predictor
    model.fit(X_train, Y_train)
    # test predictor
    Y_pred = model.predict(X_test)
    Y_proba = model.predict_proba(X_test)[:,1] 
    fpr, tpr, _ = metrics.roc_curve(Y_test, Y_proba)    # 获取真阳率、伪阳率、阈值
    auc = metrics.auc(fpr, tpr)                              # AUC得分
    score = metrics.accuracy_score(Y_test, Y_pred)                 # 模型准确率
    print('AUC: %.3f, Accuracy: %.3f' % (auc,  score))
    


