from test import get_metrics_from_index_list, MAX_NUMBER, save_arch_str2op_list, padding_zeros, operation2integers, \
    delete_useless_node, API201, get_pair
from Toy_experiment import  get_toy_data
import os
import pickle
import random
import copy
import numpy as np
from sklearn import svm
import argparse

expand = 1

NONE = 'none'
CONV1X1 = 'nor_conv_1x1'
CONV3X3 = 'nor_conv_3x3'
AP3X3 = 'avg_pool_3x3'
SKIP = 'skip_connect'

def get_pair(X):
    assert len(X) > 0
    num = len(X)
    data_x = []
    for i in range(num - 1):
        for j in range(i + 1, num):
            h = X[i] + X[j]
            data_x.append(h)
    return np.array(data_x)

def get_pair_two(X,y):
    assert len(X) > 0
    num = len(X)
    data_x = []
    data_y = []
    for i in range(num - 1):
        for j in range(i + 1, num):
            data_x.append(X[i] + X[j])
            if y[i] >= y[j]:
                data_y.append(1)
            else:
                data_y.append(0)
    return np.array(data_x), np.array(data_y).reshape([-1, 1])


def op_list2str(op_list):
    op_str = '|{}~0|+|{}~0|{}~1|+|{}~0|{}~1|{}~2|'.format(op_list[0], op_list[1], op_list[2], op_list[3], op_list[4],
                                                          op_list[5])
    return op_str


def predict_by_predictor(population, predictor, integers2one_hot):
    population_metrics = get_metrics_from_population(population)
    X3, _, __ = get_toy_data(population_metrics, select_upper_tri=False, additional_metrics=False, integers2one_hot=integers2one_hot)
    X = get_pair(X3)
    pred_y = predictor.predict(X)
    ord = 0

    for i in range(len(X3) - 1):
        for j in range(i + 1, len(X3)):
            if pred_y[ord] == 1:
                population[i]['win_time'] += 1
            else:
                population[j]['win_time'] += 1
            ord = ord + 1
    return population


def get_metrics_from_population(population):
    metrics = {}
    for index in population:
        individual = population[index]
        op_list = individual['arch']
        pruned_matrix, pruned_op = delete_useless_node(op_list)
        if pruned_matrix is None:
            individual['acc'] = -1
            continue

        padding_matrix, padding_op = padding_zeros(pruned_matrix, pruned_op)
        op_integers = operation2integers(padding_op)

        metrics[index] = {'final_training_time': -1, 'final_test_accuracy': -1}
        metrics[index]['fixed_metrics'] = {'module_adjacency': padding_matrix, 'module_integers': op_integers,
                                           'trainable_parameters': -1}
    return metrics


def population_initialization(ordered_dic, population_size=100):
    population = {}
    sample_list = list(range(0, MAX_NUMBER))
    expand_population_size = int(population_size * expand)
    index_list = random.sample(sample_list, expand_population_size)
    times = 0
    for index in index_list:
        if times == population_size:
            break
        op_list = save_arch_str2op_list(ordered_dic[index]['arch_str'])
        pruned_matrix, pruned_op = delete_useless_node(op_list)
        final_test_acc = ordered_dic[index]['cifar10']
        if pruned_matrix is None:
            continue
        population[times] = {}
        population[times]['arch'] = op_list
        population[times]['acc'] = final_test_acc/100
        population[times]['win_time'] = 0
        population[times]['index'] = index
        times += 1
    return population


def binary_selection(population):
    population_size = len(population)
    random_two_integer = random.sample(list(range(0, population_size - 1)), 2)
    win1 = population[random_two_integer[0]]['win_time']
    win2 = population[random_two_integer[1]]['win_time']
    if win1 > win2:
        return random_two_integer[0]
    else:
        return random_two_integer[1]


def generate_offspring(population):
    num_repeate = len(population) // 2
    offspring = {}
    for i in range(num_repeate):
        arch_index1 = binary_selection(population)
        arch_index2 = copy.deepcopy(arch_index1)
        num_while = 0
        while (arch_index1 == arch_index2):
            arch_index2 = binary_selection(population)
            num_while += 1
            if num_while > 10:
                print('Choosing two architecture index too many times! Some Errors occur!')  # 保证选出不一样的

        cross_prob = 0.8
        if random.random() < cross_prob:
            offspring1, offspring2 = crossover_operator(population, arch_index1, arch_index2)
            # check invalid individual
            pruned_matrix, pruned_op = delete_useless_node(offspring1)
            if pruned_matrix is None:
                offspring1 = population[arch_index1]['arch']

            pruned_matrix, pruned_op = delete_useless_node(offspring2)
            if pruned_matrix is None:
                offspring2 = population[arch_index2]['arch']
            mutation_offspring1 = offspring1
            mutation_offspring2 = offspring2
        else:
            offspring1, offspring2 = population[arch_index1]['arch'], population[arch_index2]['arch']

            mutation_offspring1 = mutation_operator(offspring1)
            mutation_offspring2 = mutation_operator(offspring2)
            # check invalid individual
            pruned_matrix, pruned_op = delete_useless_node(mutation_offspring1)
            if pruned_matrix is None:
                mutation_offspring1 = offspring1

            pruned_matrix, pruned_op = delete_useless_node(mutation_offspring2)
            if pruned_matrix is None:
                mutation_offspring2 = offspring2

        offspring[i * 2] = {'arch': mutation_offspring1, 'win_time': 0}
        offspring[i * 2 + 1] = {'arch': mutation_offspring2, 'win_time': 0}

    return offspring


def mutation_operator(offspring):
    # determine the mutation position
    mutation_position = random.randint(0, 5)

    mutation_offspring = copy.deepcopy(offspring)

    op_list = [NONE, CONV1X1, CONV3X3, AP3X3, SKIP]
    original_operation = mutation_offspring[mutation_position]
    op_list.remove(original_operation)
    force_mutation_op = random.choice(op_list)
    mutation_offspring[mutation_position] = force_mutation_op
    return mutation_offspring


def crossover_operator(population, index_arch1, index_arch2):
    # determine the crossover position
    crossover_position = random.randint(0, 5)
    new_arch1, new_arch2 = population[index_arch1]['arch'], population[index_arch2]['arch']
    temp_op = new_arch1[crossover_position]
    new_arch1[crossover_position] = new_arch2[crossover_position]
    new_arch2[crossover_position] = temp_op

    return new_arch1, new_arch2


def environment_selection(population, offspring):
    elitism_rate = 0.1
    next_population = {}
    population_size = len(population)
    elitism_num = int(population_size * elitism_rate)

    population_order = sorted(population.items(), key=lambda x: x[1]['win_time'], reverse=True)
    for index in range(elitism_num):
        next_population[index] = {'arch': population_order[index][1]['arch'], 'win_time': 0}

    population_order1 = sorted(offspring.items(), key=lambda x: x[1]['win_time'], reverse=True)
    for i in range(elitism_num, population_size):
        next_population[i] = {'arch': population_order1[i-elitism_num][1]['arch'], 'win_time': 0}

    return next_population


def get_metrics_from_poparch(pop):
    metrics4 = {}
    for i in range(len(pop)):
        accc = query_by_arch_indi(pop[i]['arch'])
        win = pop[i]['win_time']
        op_list = pop[i]['arch']
        pruned_matrix, pruned_op = delete_useless_node(op_list)
        if pruned_matrix is None:
            continue
        padding_matrix, padding_op = padding_zeros(pruned_matrix, pruned_op)
        op_integers = operation2integers(padding_op)

        metrics4[i] = {'final_training_time': 0, 'final_test_accuracy': accc / 100, 'win_time': win}
        metrics4[i]['fixed_metrics'] = {'module_adjacency': padding_matrix, 'module_integers': op_integers,
                                            'trainable_parameters': -1}
    return metrics4

def population_log(gen_no,population):
    save_path = r'pkl\gen_{}.text'.format(gen_no)
    with open(save_path, 'w') as myfile:
        myfile.write(str(population))
        myfile.write("\n")


def GAon201(predictor, train_num, num_generation, integers2one_hot, add_fit,upper_limit_time):
    # load data
    tidy_file = r'pkl/tidy_nas_bench_201.pkl'
    if not os.path.exists(tidy_file):
        raise Exception("tidy_nas_bench_201.pkl没有")
    else:
        with open(tidy_file, 'rb') as file:
            ordered_dic = pickle.load(file)

    # start to sample train dataset
    expand_train_num = int(train_num * expand)
    sample_list = list(range(0, MAX_NUMBER))
    train_list = random.sample(sample_list, expand_train_num)
    train_list.sort()

    train_metrics = get_metrics_from_index_list(train_list, ordered_dic, train_num, 'cifar10',  upper_limit_time)
    X1, y1, _ = get_toy_data(train_metrics, select_upper_tri=False, additional_metrics = False, integers2one_hot = integers2one_hot)
    X,y = get_pair_two(X1, y1)

    # data = np.array(y)
    # print('1啊', np.sum(data == 1))
    # print('2是', np.sum(data == 0))   # 某次 1啊 5869  2是 6851

    # initialize predictor and fit
    print("-----------fit-------------")
    predictor.fit(X, y.ravel())

    # initialize population
    population = population_initialization(ordered_dic)
    print("------population_initialization--------")
    population = predict_by_predictor(population, predictor, integers2one_hot)
    print("--------p1--------")

    for gen_no in range(num_generation):
        offspring = generate_offspring(population)
        print('start generate offspring {}'.format(gen_no))
        population = environment_selection(population, offspring)
        population = predict_by_predictor(population, predictor, integers2one_hot)

        if add_fit:
            # population_log(gen_no, population)
            population_order0 = sorted(population.items(), key=lambda x: x[1]['win_time'], reverse=True)

            # # 试验观察记录
            # cord_population = {}
            # for i in range(80):
            #   cord_population[i] = {'arch': population_order0[i][1]['arch'], 'win_time':  population_order0[i][1]['win_time']}
            # metricc = get_metrics_from_poparch(cord_population)
            # population_log(gen_no, metricc)

            fit_population = {}
            for i in range(19):
              fit_population[i] = {'arch': population_order0[i][1]['arch'], 'win_time': 0}
            random.shuffle(fit_population)
            metric9 = get_metrics_from_poparch(fit_population)
            X2, y2, _ = get_toy_data(metric9, select_upper_tri=False, additional_metrics=False, integers2one_hot=integers2one_hot)
            X0, y0 = get_pair_two(X2, y2)
            print('---add_fit----')
            try:
              predictor.fit(X0, y0.ravel())
            except:
              print('ValueError: The number of classes has to be greater than one; got 1 class')

    return population


def query_by_arch(best_arch):
    tidy_file = r'pkl/tidy_nas_bench_201.pkl'
    if not os.path.exists(tidy_file):
        raise Exception("Please run demo3.py first!")
    else:
        with open(tidy_file, 'rb') as file:
            ordered_dic = pickle.load(file)
    for index in ordered_dic:
        arch = ordered_dic[index]['arch_str']
        if arch == best_arch:
            best_metric = ordered_dic[index]
            break

    return best_metric['cifar10'], best_metric['cifar10_valid200']



def query_by_arch_indi(indi_arch):
    ac = 0
    indi_arch0 = op_list2str(indi_arch)
    tidy_file = r'pkl/tidy_nas_bench_201.pkl'
    if not os.path.exists(tidy_file):
        raise Exception("没有那个文件!")
    else:
        with open(tidy_file, 'rb') as file:
            ordered_dic = pickle.load(file)
    for index in ordered_dic:
        arch = ordered_dic[index]['arch_str']
        if arch == indi_arch0:
            ac = ordered_dic[index]['cifar10']
            break

    return ac

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='online predictor')
    parser.add_argument('--integers2one-hot', choices=[True, False], default=True, help='one-hot encoding')
    args = parser.parse_args()
    add_fit = False


    # create_more_metrics = args.create_more_metrics
    integers2one_hot = args.integers2one_hot
    num_generation = 20
    # the num_query is not the limit of the number of query architectures, but the time=4000 is the limitation
    num_query = 500
    repeat_num = 10
    best_acc_list = []
    best_valid_acc_list = []
    upper_limit_time = 12000

    for _ in range(repeat_num):
        predictor = svm.SVC()
        last_population = GAon201(predictor, num_query, num_generation, integers2one_hot, add_fit, upper_limit_time)
        population_order = sorted(last_population.items(), key=lambda x: x[1]['win_time'], reverse=True)
        best_arch = population_order[0][1]['arch']
        best_arch_str = op_list2str(best_arch)

        # query test acc
        # nasbench201 = API201(r'path/NAS-Bench-201-v1_0-e61699.pth')
        # best_acc = nasbench201.query_by_arch(best_arch_str)
        best_acc, best_valid_acc = query_by_arch(best_arch_str)
        print('Best acc: {}'.format(best_acc))
        best_acc_list.append(best_acc)
        best_valid_acc_list.append(best_valid_acc)


    # predictor = svm.SVC()
    # last_population = GAon201(predictor, num_query, num_generation, integers2one_hot)
    # population_order = sorted(last_population.items(), key=lambda x: x[1]['win_time'], reverse=True)
    # best_arch = population_order[0][1]['arch']
    # best_arch_str = op_list2str(best_arch)

        # query test acc
        # nasbench201 = API201(r'path/NAS-Bench-201-v1_0-e61699.pth')
        # best_acc = nasbench201.query_by_arch(best_arch_str)
    best_acc, best_valid_acc = query_by_arch(best_arch_str)
    print('Best acc: {}'.format(best_acc))
    print('best_valid_acc: {}'.format(best_valid_acc))
    best_acc_list.append(best_acc)
    best_valid_acc_list.append(best_valid_acc)
    #
    print('Best acc mean: {}, std: {}'.format(np.mean(best_acc_list), np.std(best_acc_list)))
    print('Best valid_acc mean: {}, std: {}'.format(np.mean(best_valid_acc_list), np.std(best_valid_acc_list)))


    """
    for i in range (3) :
        h[i] = {}
        h[i]['a'] = 0
        h[i]['b'] = i*2
    
    j = sorted(h.items(), key=lambda x: x[1]['b'], reverse=True)
           
    print(j)
           
    [(2, {'a': 0, 'b': 4}), (1, {'a': 0, 'b': 2}), (0, {'a': 0, 'b': 0})]
    
    print('1',j[0][0],'2',j[0][1]['b'])
           
    1 2 2 4
    """


"""
num 100
1
Best acc: 94.00666666666666
best_valid_acc: 90.87066664957683

Best acc: 93.5
best_valid_acc: 90.9560000024414

Best acc: 94.16
best_valid_acc: 91.06000000244141

Best acc: 93.08500000000001
best_valid_acc: 89.93399998046876

Best acc: 94.30666666666667
best_valid_acc: 91.33599998535158

num 110
Best acc: 94.16
best_valid_acc: 91.06000000244141
"""

"""
150 15  
Best acc mean: 93.56803030303031, std: 1.069590410178765
Best valid_acc mean: 90.36612120697946, std: 1.4553401343572332

种群 150 
150 20  
run 1
Best acc mean: 93.9718181818182, std: 0.4493617553198838
Best valid_acc mean: 91.03199998468573, std: 0.638333939283472

run 2
Best acc mean: 93.53060606060608, std: 0.6414406420994923
Best valid_acc mean: 90.39660604980469, std: 0.8380063600413388
"""
