import os
import pickle
import numpy as np
import copy
import random
import collections
from nasbench import api
from nas_201_api import NASBench201API as API201
from Toy_experiment import get_toy_data
from sklearn import svm
from scipy.stats import kendalltau


model_svm = svm.SVC()
# basic matrix for nas_bench 201
BASIC_MATRIX = [[0, 1, 1, 0, 1, 0, 0, 0],
                [0, 0, 0, 1, 0, 1, 0, 0],
                [0, 0, 0, 0, 0, 0, 1, 0],
                [0, 0, 0, 0, 0, 0, 1, 0],
                [0, 0, 0, 0, 0, 0, 0, 1],
                [0, 0, 0, 0, 0, 0, 0, 1],
                [0, 0, 0, 0, 0, 0, 0, 1],
                [0, 0, 0, 0, 0, 0, 0, 0]]

MAX_NUMBER = 15625
NULL = 'null'
CONV1X1 = 'nor_conv_1x1'
CONV3X3 = 'nor_conv_3x3'
AP3X3 = 'avg_pool_3x3'


def delete_useless_node(ops):
    # delete the skip connections nodes and the none nodes
    # output the pruned metrics
    # start to change matrix
    matrix = copy.deepcopy(BASIC_MATRIX)
    for i, op in enumerate(ops, start=1):
        m = []
        n = []

        if op == 'skip_connect':
            for m_index in range(8):
                ele = matrix[m_index][i]
                if ele == 1:
                    # set element to 0
                    matrix[m_index][i] = 0
                    m.append(m_index)

            for n_index in range(8):
                ele = matrix[i][n_index]
                if ele == 1:
                    # set element to 0
                    matrix[i][n_index] = 0
                    n.append(n_index)

            for m_index in m:
                for n_index in n:
                    matrix[m_index][n_index] = 1

        elif op == 'none':
            for m_index in range(8):
                matrix[m_index][i] = 0
            for n_index in range(8):
                matrix[i][n_index] = 0

    ops_copy = copy.deepcopy(ops)
    ops_copy.insert(0, 'input')
    ops_copy.append('output')

    # start pruning
    model_spec = api.ModelSpec(matrix=matrix, ops=ops_copy)
    return model_spec.matrix, model_spec.ops


def save_arch_str2op_list(save_arch_str):
    op_list = []
    save_arch_str_list = API201.str2lists(save_arch_str)
    op_list.append(save_arch_str_list[0][0][0])
    op_list.append(save_arch_str_list[1][0][0])
    op_list.append(save_arch_str_list[1][1][0])
    op_list.append(save_arch_str_list[2][0][0])
    op_list.append(save_arch_str_list[2][1][0])
    op_list.append(save_arch_str_list[2][2][0])
    return op_list


def padding_zeros(matrix, op_list):
    assert len(op_list) == len(matrix)
    padding_matrix = matrix
    len_operations = len(op_list)
    if not len_operations == 8:
        for j in range(len_operations, 8):
            op_list.insert(j - 1, NULL)
        adjecent_matrix = copy.deepcopy(matrix)
        padding_matrix = np.insert(adjecent_matrix, len_operations - 1, np.zeros([8 - len_operations, len_operations]),
                                   axis=0)
        padding_matrix = np.insert(padding_matrix, [len_operations - 1], np.zeros([8, 8 - len_operations]), axis=1)

    return padding_matrix, op_list


def operation2integers(op_list):
    dict_oper2int = {NULL: 0, CONV1X1: 1, CONV3X3: 2, AP3X3: 3}
    module_integers = np.array([dict_oper2int[x] for x in op_list[1: -1]])
    return module_integers


def get_metrics_from_index_list(index_list, ordered_dic, metrics_num, dataset, upper_limit_time=12000):
    metrics = {}
    times = 0
    total_time = 0
    for index in index_list:
        if times == metrics_num:
            break
        final_test_acc = ordered_dic[index][dataset]
        epoch12_time = ordered_dic[index]['cifar10_all_time']
        total_time += epoch12_time
        if total_time > upper_limit_time:
            break
        op_list = save_arch_str2op_list(ordered_dic[index]['arch_str'])
        pruned_matrix, pruned_op = delete_useless_node(op_list)
        if pruned_matrix is None:
            continue
        else:
            times += 1
        padding_matrix, padding_op = padding_zeros(pruned_matrix, pruned_op)
        op_integers = operation2integers(padding_op)

        metrics[index] = {'final_training_time': epoch12_time, 'final_test_accuracy': final_test_acc / 100}
        metrics[index]['fixed_metrics'] = {'module_adjacency': padding_matrix, 'module_integers': op_integers,
                                           'trainable_parameters': -1}
    return metrics




def experiment_on_201(train_num, test_num, dataset, integers2one_hot):
    expand = 1
    expand_train_num = int(train_num * expand)
    expand_test_num = int(test_num * expand)

    print('Loading original nas bench architecture and acc.')
    tidy_file = r'D:/代码/0 论文代码/GAon201/pkl/tidy_nas_bench_201.pkl'
    if not os.path.exists(tidy_file):
        nasbench201 = API201(r'D:/nasbench201data/NAS-Bench-201-v1_0-e61699.pth')
        ordered_dic = collections.OrderedDict()  # 从结果可以看出，如果是普通的字典，即使传入的顺序不一样，但是依然是相同的字典；如果是orderdict，传入的顺序不一样，那么得到的字典是不一样的
        for index in range(len(nasbench201.evaluated_indexes)):
            info = nasbench201.query_meta_info_by_index(index, '12')
            arch_str = info.arch_str
            cifar10_valid = info.get_metrics('cifar10-valid', 'x-valid')['accuracy']
            cifar10_all_time = info.get_metrics('cifar10-valid', 'x-valid')['all_time']

            info = nasbench201.query_meta_info_by_index(index, '200')
            cifar10 = info.get_metrics('cifar10', 'ori-test')['accuracy']
            cifar10_valid200 = info.get_metrics('cifar10-valid', 'x-valid')['accuracy']
            index_info = {'arch_str': arch_str, 'cifar10': cifar10, 'cifar10_valid': cifar10_valid,
                          'cifar10_all_time': cifar10_all_time, 'cifar10_valid200': cifar10_valid200}
            ordered_dic[index] = index_info

        with open(tidy_file, 'wb') as file:
            pickle.dump(ordered_dic, file)
    else:
        with open(tidy_file, 'rb') as file:
            ordered_dic = pickle.load(file)

    print('Selecting train and test index.')
    train_index_save_path = r'pkl/fixed_train_data_201_{}.pkl'.format(train_num)
    if os.path.exists(train_index_save_path):
        with open(train_index_save_path, 'rb') as file:
            train_list = pickle.load(file)
    else:
        sample_list = list(range(0, MAX_NUMBER))
        train_list = random.sample(sample_list, expand_train_num)
        train_list.sort()
        with open(train_index_save_path, 'wb') as file:
            pickle.dump(train_list, file)

    list_remove_train = list(range(0, MAX_NUMBER))
    for i in range(expand_train_num):
        list_remove_train.remove(train_list[i])
    test_list = random.sample(list_remove_train, expand_test_num)
    test_list.sort()

    print('Generating metrics like nas-bench-101.')
    train_metrics = get_metrics_from_index_list(train_list, ordered_dic, train_num, dataset, upper_limit_time=10000000)
    test_metrics = get_metrics_from_index_list(test_list, ordered_dic, test_num, dataset, upper_limit_time=10000000)

    print('----------------------train---------------------')
    X, y, _ = get_toy_data(train_metrics, select_upper_tri=False,
                           additional_metrics=False,
                           integers2one_hot=integers2one_hot)
    print('----------------------test----------------------')
    testX, testy, _ = get_toy_data(test_metrics,  select_upper_tri=False,
                                   additional_metrics=False,
                                   integers2one_hot=integers2one_hot)

    return X, y, testX, testy


def get_pair(X, y):
    assert len(X) > 0
    num = len(X)
    data_x = []
    data_y = []
    for i in range(num - 1):
        for j in range(i + 1, num):
            data_x.append(X[i]+X[j])
            if y[i] >= y[j]:
                data_y.append(1)
            else:
                data_y.append(0)
    return np.array(data_x), np.array(data_y).reshape([-1, 1])


def calculate_KT(result, y_test, method, show_fig=True):
    result_arg = np.argsort(result)
    y_test_arg = np.argsort(y_test)
    result_rank = np.zeros(len(y_test_arg))
    y_test_rank = np.zeros(len(y_test_arg))
    for i in range(len(y_test_arg)):
        result_rank[result_arg[i]] = i
        y_test_rank[y_test_arg[i]] = i
    KTau, _ = kendalltau(result_rank, y_test_rank)
    print('method: {:}, KTau: {:}'.format(method, KTau))
    print('--------------------try-end---------------------\n')
    if show_fig:
        x = np.arange(0, 1, 0.01)
        y = x
        plt.figure(figsize=(5, 5))
        plt.plot(x, y, 'g', label='y_test = result')
        plt.scatter(result, y_test, s=1)
        plt.xlabel("predict_result")
        plt.ylabel("y_test")
        plt.title(f"method:{method}")
        plt.legend(loc="best")
        plt.show()

        x = np.arange(0, len(y_test), 0.1)
        y = x
        plt.figure(figsize=(6, 6))
        line_color = '#1F77D0'
        plt.plot(x, y, c=line_color, linewidth=1)
        point_color = '#FF4400'
        plt.scatter(result_rank, y_test_rank, c=point_color, s=2)
        plt.xlabel("predict_result")
        plt.ylabel("y_test")
        plt.title(f"method:{method}---KTau:{KTau}")
        plt.xlim(xmax=500, xmin=0)
        plt.ylim(ymax=500, ymin=0)
        plt.show()


def get_win_result(test_num, pred_y):
    ord = 0
    win_result = np.zeros(test_num)
    for i in range(test_num - 1):
        for j in range(i + 1, test_num):
            if pred_y[ord] == 1:
                win_result[i] += 1
            else:
                win_result[j] += 1
            ord = ord + 1
    return list(win_result)

if __name__ == '__main__':
    train_num = 10
    test_num = 20
    # datasets: cifar10, cifar100, ImageNet
    dataset = 'cifar10'
    create_more_metrics = False
    integers2one_hot = True

    X, y, testX, testy = experiment_on_201(train_num, test_num, dataset, integers2one_hot=integers2one_hot)

    X1, Y1 = get_pair(X, y)

    # data = np.array(Y1)
    # print('1啊', np.sum(data == 1))
    # print('2是', np.sum(data == 0))   # 1啊 5809 2是 5366


    X2, Y2 = get_pair(testX, testy)

    # data = np.array(Y2)
    # print('1啊', np.sum(data == 1))
    # print('2是', np.sum(data == 0))   # 1啊 245056 2是 254444

    print("-------model_svm.fit------")
    model_svm.fit(X1, Y1.ravel())
    print("------------ssssssssssssss-----------------------")
    score2 = model_svm.score(X2, Y2)
    print('model_svm', score2)

    print("-------svm.predict------")
    pred_y1 = model_svm.predict(X2)
    print("-------get_win_result------")
    win_result1 = get_win_result(test_num, pred_y1)
    calculate_KT(win_result1, testy, 'svm', show_fig=True)


    """
    cifar10
    D:\anaconda3\envs\TF2.4\python.exe D:/代码/GAon201/test.py
2022-08-21 15:07:47.578705: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library cudart64_101.dll
Loading original nas bench architecture and acc.
Selecting train and test index.
Generating metrics like nas-bench-101.
----------------------train---------------------
Input 424 metrics, obtain 424 metrics
----------------------test----------------------
Input 1000 metrics, obtain 1000 metrics
-------model_svm.fit------
------------ssssssssssssss-----------------------
model_svm 0.8922082082082082

Process finished with exit code 0


D:\anaconda3\envs\TF2.4\python.exe D:\代码\GAon201\test.py
2022-08-23 21:39:16.485614: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library cudart64_101.dll
Loading original nas bench architecture and acc.
Selecting train and test index.
Generating metrics like nas-bench-101.
----------------------train---------------------
Input 150 metrics, obtain 150 metrics
----------------------test----------------------
Input 1000 metrics, obtain 1000 metrics
-------model_svm.fit------
------------ssssssssssssss-----------------------
model_svm 0.8591651651651652

Process finished with exit code 0


cifar10_valid
D:\anaconda3\envs\TF2.4\python.exe D:\代码\GAon201\test.py
2022-08-23 22:28:51.899281: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library cudart64_101.dll
Loading original nas bench architecture and acc.
Selecting train and test index.
Generating metrics like nas-bench-101.
----------------------train---------------------
Input 150 metrics, obtain 150 metrics
----------------------test----------------------
Input 1000 metrics, obtain 1000 metrics
-------model_svm.fit------
------------ssssssssssssss-----------------------
model_svm 0.7864644644644645

Process finished with exit code 0
    """

    """
    cifar10
    D:\anaconda3\envs\TF2.4\python.exe D:\代码\GAon201\test.py
2022-08-23 20:06:48.878493: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library cudart64_101.dll
Loading original nas bench architecture and acc.
Selecting train and test index.
Generating metrics like nas-bench-101.
----------------------train---------------------
Input 424 metrics, obtain 424 metrics
----------------------test----------------------
Input 1000 metrics, obtain 1000 metrics
-------model_svm.fit------
------------ssssssssssssss-----------------------
model_svm 0.8893533533533533

Process finished with exit code 0

    """

    """
    150 500 valid model_svm 0.7783166332665331
       valid200 model_svm 0.864561122244489
    cifar10   model_svm 0.8603847695390782
    """