import random

import numpy as np

from src.generator_utils import get_random_seed_tensor, exec_method_by_index
from src.evaluator import has_precision_problem_occurred
import tensorflow as tf

import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "1"

# 不探索的概率
EPSILON = float(sys.argv[1])
MAX_ITERATIONS = 10000
DISCOUNT = float(sys.argv[2])
LEARNING_RATE = float(sys.argv[3])
b = float(sys.argv[4])
operator = sys.argv[5]
shape = tuple(sys.argv[6])
framework = sys.argv[8]


def init_table(methods_num):
    return np.zeros((methods_num, 3), dtype=np.float32)


def init_table_seq(methods_num):
    return np.zeros((methods_num, methods_num), dtype=np.float32)


def choose_action_method(cur_state, q_table):
    p_action = q_table[cur_state]
    if np.random.uniform() > EPSILON or p_action.all() == 0:
        rand_method = np.random.randint(0, q_table.shape[-1])
    else:
        rand_method = p_action.argmax()
    return rand_method


def score(s, shape, framework):
    frameworks = ['tf', 'torch', 'mnn']
    tensor_seed = get_random_seed_tensor(shape)
    final_tensor = exec_method_by_index(tensor_seed, s)
    if has_precision_problem_occurred(tensor=final_tensor, operator=operator, b=b)[frameworks.index(framework)]:
        return True
    else:
        return False


def score_seq(s, shape, framework, tensor):
    frameworks = ['tf', 'torch', 'mnn']
    final_tensor = exec_method_by_index(tensor, s)
    if has_precision_problem_occurred(tensor=final_tensor, operator=operator, b=b)[frameworks.index(framework)]:
        return True, final_tensor
    else:
        return False, final_tensor


def get_feedback(s, a, shape, framework):
    if a == 2:
        if s == 41:
            s_ = s
            reward = 0
        else:
            s_ = s + 1
            reward = 1 if score(s_, shape, framework) else 0
    elif a == 1:
        s_ = s
        reward = 1 if score(s_, shape, framework) else 0
    elif a == 0:
        if s == 0:
            s_ = s
            reward = 0
        else:
            s_ = s - 1
            reward = 1 if score(s_, shape, framework) else 0
    else:
        return None, None
    return s_, reward


def get_feedback_seq(s, a, shape, framework, tensor, c):
    s_ = a
    if np.any(tensor <= -9.7):
        reward = -1
    res, tensor_ = score_seq(s_, shape, framework, tensor)
    if c <= 30:
        reward = 1 / c if res else 0
    else:
        reward = -1
    return s_, reward, tensor_


def get_next_state(s, a):
    if a == 2:
        if s == 41:
            s_ = s
        else:
            s_ = s + 1
        return s_
    elif a == 1:
        s_ = s
        return s_
    elif a == 0:
        if s == 0:
            s_ = s
        else:
            s_ = s - 1
        return s_
    else:
        return None, None


def get_next_state_seq(s, a):
    return a


if __name__ == '__main__':
    frameworks = ['tf', 'torch', 'mnn']
    if operator == 'batch_normalization':
        tf.keras.backend.set_learning_phase(1)

    root_dir = './data/{0}/{1}1'.format(framework, operator)
    if not os.path.exists(root_dir):
        os.mkdir(root_dir)

    # q_table = init_table(42)
    # # pre_q_table = q_table.copy()
    # for iter in range(MAX_ITERATIONS):
    #     print('iteration {0}'.format(iter+1))
    #     s = random.randint(0, 41)
    #     ended = False
    #     while not ended:
    #         a_chosen = choose_action_method(s, q_table)
    #         s_, reward = get_feedback(s, a_chosen, shape, framework)
    #         if reward != 1:
    #             v = reward + DISCOUNT * q_table[s_].max() - q_table[s][a_chosen]
    #         else:
    #             v = reward - q_table[s][a_chosen]
    #             ended = True
    #         q_table[s][a_chosen] += LEARNING_RATE * v
    #         s = s_
    #     # print(np.sum(np.maximum(q_table - pre_q_table, pre_q_table - q_table)))
    #     # pre_q_table = q_table.copy()
    # print(q_table)

    # seq形式
    q_table = init_table_seq(42)
    # pre_q_table = q_table.copy()
    for iter in range(MAX_ITERATIONS):
        print('iteration {0}'.format(iter+1))
        s = random.randint(0, 41)
        tensor = get_random_seed_tensor(shape)
        ended = False
        c = 1
        while not ended:
            a_chosen = choose_action_method(s, q_table)
            s_, reward, tensor_ = get_feedback_seq(s, a_chosen, shape, framework, tensor, c)
            if reward == 0:
                v = reward + DISCOUNT * q_table[s_].max() - q_table[s][a_chosen]
            else:
                v = reward - q_table[s][a_chosen]
                ended = True
            q_table[s][a_chosen] += LEARNING_RATE * v
            s = s_
            tensor = tensor_
            c += 1
        # print(np.sum(np.maximum(q_table - pre_q_table, pre_q_table - q_table)))
        # pre_q_table = q_table.copy()
    print(q_table)

    # # 训练完成开始采样
    # lst = [0 for i in range(42)]
    # success_cases_num = 15000
    # for t in range(15000):
    #     print('sampling {0}'.format(t+1))
    #     s = random.randint(0, 41)
    #     tensor_seed = get_random_seed_tensor(shape)
    #     final_tensor = exec_method_by_index(tensor_seed, s)
    #     counter = 1
    #     while not has_precision_problem_occurred(tensor=final_tensor, operator=operator, b=b)[frameworks.index(framework)]:
    #         if counter == 30:
    #             success_cases_num -= 1
    #             break
    #         a_chosen = choose_action_method(s, q_table)
    #         s_ = get_next_state(s, a_chosen)
    #         s = s_
    #         final_tensor = exec_method_by_index(tensor_seed, s)
    #         counter += 1
    #     if counter != 30:
    #         lst[s] += 1
    # print('{0} successful cases'.format(success_cases_num))
    # print([x / success_cases_num for x in lst])

    # 训练完成开始采样 seq format
    records = {}
    success_cases_num = 15000
    good = 0
    start = time.time()
    while time.time() - start < int(sys.argv[7] * 1200):
        print('sampling {0}'.format(t+1))
        s = random.randint(0, 41)
        path = [str(s)]
        tensor_seed = get_random_seed_tensor(shape)
        final_tensor = exec_method_by_index(tensor_seed, s)
        if np.any(final_tensor <= -9.7):
            continue
        counter = 1
        while not has_precision_problem_occurred(tensor=final_tensor, operator=operator, b=b)[frameworks.index(framework)]:
            if counter == 30:
                success_cases_num -= 1
                break
            a_chosen = choose_action_method(s, q_table)
            s_ = get_next_state_seq(s, a_chosen)
            s = s_
            path.append(str(s))
            final_tensor = exec_method_by_index(final_tensor, s)
            counter += 1
        # 存储
        if counter != 30:
            np.save(root_dir + '/{0}.npy'.format(good), final_tensor)

        path_str = ','.join(path)
        if counter != 30 and not records.__contains__(path_str):
            records[path_str] = 1
            good += 1
        elif counter != 30:
            records[path_str] += 1
            good += 1

    lst = [0 for i in range(42)]
    for tup in records.items():
        for mut in tup[0].split(','):
            lst[int(mut)] += tup[1]
        print(tup)
    print([x / success_cases_num for x in lst])
    print('{0} successful cases'.format(success_cases_num))

