import os
import torch.optim as optim
from object_detection_tools import *
from object_detection_actions import *
import time

# parameters
epsilon = 1.0
BATCH_SIZE = 100
GAMMA = 0.90
CLASS_OBJECT = 1
steps = 10
epochs = 50  # 50
Buffer_size = 1000
memory = ReplayMemory(Buffer_size)

LearningRate = 1e-6  # 0.000001
MODEL_UPDATE_ITER = 0
TARGET_UPDATE_ITER = 100  # 原本是 200
# Different actions that the agent can do
number_of_actions = 6
# Actions captures in the history vector
actions_of_history = 4


Transition = namedtuple('Transition', ('state', 'action', 'next_state', 'reward'))
os.environ["CUDA_VISIBLE_DEVICES"] = "3"

# get models
print('load Feature_extractor model')
Feature_extractor = get_feature_extractor().cuda()
print('load Q_net model')
Q_net = get_q_network()
# target_Q_net = Q_net
target_Q_net = get_q_network()
Q_net = Q_net.cuda()
target_Q_net = target_Q_net.cuda()

# define optimizers for each model
optimizer = optim.Adam(Q_net.parameters(), lr=LearningRate)
criterion = nn.MSELoss().cuda()


# get image datas
# get image datas
path_voc_1 = "../dataset/VOCdevkit/VOC2007"
path_voc_2 = "../dataset/VOCdevkit/VOC2012"
data_set_name = 'aeroplane_trainval'
# Class category of PASCAL that the RL agent will be searching
class_object = '1'
image_names_1, images_1 = load_image_data(path_voc_1, data_set_name, class_object)
image_names_2, images_2 = load_image_data(path_voc_2, data_set_name, class_object)
# image_names_1, images_1 = get_trainval_images(path_voc_1, data_set_name)
# image_names_2, images_2 = get_trainval_images(path_voc_2, data_set_name)
image_names = image_names_1 + image_names_2
images = images_1 + images_2
print('len(images_1) = ', len(images_1))
print('len(images_2) = ', len(images_2))

print("trainval image:%d" % len(image_names))

# image_names = ['000032']
# string = path_voc_1 + '/JPEGImages/' + image_names[0] + '.jpg'
# images = np.array(Image.open(string))


def select_action(state):
    if random.random() < epsilon:
        action = np.random.randint(0, 6)
    else:
        qval = Q_net(Variable(state))
        _, predicted = torch.max(qval.data, 1)
        action = predicted[0]
    return action

# define the Pytorch Tensor
use_cuda = torch.cuda.is_available()
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor
ByteTensor = torch.cuda.ByteTensor if use_cuda else torch.ByteTensor
Tensor = FloatTensor


def optimizer_model():
    # 若 Replay Buffer 未被填满，就先不更新
    if len(memory) < BATCH_SIZE:
        return

    # 从 Replay Buffer 中随机采样 100 个 transition 样本
    transitions = memory.sample(BATCH_SIZE)
    batch = Transition(*zip(*transitions))

    # 先处理 有next_state的transitions
    # non_final_mask[i]=1代表transition i的next_state存在；否则为0。non_final即 没有结束，还有next_state。
    non_final_mask = ByteTensor(tuple(map(lambda s: s is not None, batch.next_state)))
    # 得到 next_states
    next_states = [s for s in batch.next_state if s is not None]
    with torch.no_grad():
        non_final_next_states = Variable(torch.cat(next_states)).type(Tensor)
    # 得到 state、action、reward
    state_batch = Variable(torch.cat(batch.state)).type(Tensor)
    action_batch = Variable(torch.LongTensor(batch.action).view(-1, 1)).type(LongTensor)
    reward_batch = Variable(torch.FloatTensor(batch.reward).view(-1, 1)).type(Tensor)

    # Compute Q(s_t, a) - the model computes Q(s_t), then we select the columns of actions taken
    state_action_values = Q_net(state_batch).gather(1, action_batch)

    # Compute V(s_{t+1}) for all next states.
    next_state_values = Variable(torch.zeros(BATCH_SIZE, 1).type(Tensor))
    ne = target_Q_net(non_final_next_states).detach().max(1)[
        0]  # 有 next_state 的transition：用target network计算next_state的Q值
    next_state_values[non_final_mask] = ne.reshape([ne.shape[0], 1])  # 没有next_state的transition：next_state的Q值为0
    # Compute the expected Q values
    expected_state_action_values = (next_state_values * GAMMA) + reward_batch

    # 更新target model
    if MODEL_UPDATE_ITER % TARGET_UPDATE_ITER == 0:
        target_Q_net.load_state_dict(Q_net.state_dict())

    # Compute  loss
    # 求loss
    loss = criterion(state_action_values, expected_state_action_values)
    # print('loss = ', loss)
    # Optimize the model
    optimizer.zero_grad()  # 将loss关于weights的梯度初始化为0(一个batch的loss关于weight的导数是所有sample的loss关于weight的导数的累加和)
    loss.backward()  # 反向传播求梯度
    optimizer.step()  # 更新所有参数

    if loss_print % 200 == 0:
        print('loss = ', loss)


loss_print = 0
print('train the Q-network')
for epoch in range(epochs):
    now = time.time()
    # 对于每一张图片
    for i in range(len(image_names)):
        # the image part
        image_name = image_names[i]  # image名称
        # print('image_name = ', image_name)
        image = images[i]  # image数据
        # image的 annotation
        # annotation[i][0]是第i个object属于的class的id
        # annotation[i][1~4]是第i个object的框
        if i < len(image_names_1):
            annotation = get_bb_of_gt_from_pascal_xml_annotation(image_name, path_voc_1)
        else:
            annotation = get_bb_of_gt_from_pascal_xml_annotation(image_name, path_voc_2)
        # classes_gt_objects[i]是object i属于的class
        classes_gt_objects = get_ids_objects_from_annotation(annotation)
        # print('classes_gt_object = ', classes_gt_objects)
        # gt_masks[y_min:y_max, x_min:x_max, i]=1代表图片中有object的像素点都被设为1；其余为0
        gt_masks = generate_bounding_box_from_annotation(annotation, image.shape)

        window = Action(image, gt_masks, classes_gt_objects, CLASS_OBJECT)
        history_action = torch.zeros((actions_of_history, number_of_actions))
        state = get_state(window.now_img, history_action, Feature_extractor)
        iou = window.calculate_max_iou()

        done = False
        for step in range(steps):
            # 一旦 iou 大于 0.5, 就停止 proposal
            # Select action, the author force terminal action if case actual IoU is higher than 0.5
            if iou > 0.5:
                action = 5
            else:
                action = select_action(state)

            # Perform the action and observe new state
            if action == 5:
                next_state = None
                reward = get_reward_trigger(iou)
                done = True
            else:
                new_iou = window.action(action)
                # update history vector and get next state
                history_action = update_history_vector(history_action, action)
                next_state = get_state(window.now_img, history_action, Feature_extractor)
                reward = get_reward_movement(iou, new_iou)
                iou = new_iou
            # Store the transition in memory
            memory.push(state, action, next_state, reward)
            # Move to the next state
            state = next_state

            loss_print = loss_print + 1
            # Perform one step of the optimization (on the target network)
            optimizer_model()
            MODEL_UPDATE_ITER += 1  # model更新次数加1

            if done:
                break
    if epsilon > 0.1:
        epsilon -= 0.1
    time_cost = time.time() - now

    print('epoch = %d, time_cost = %.4f' % (epoch, time_cost))
    # save the whole model
    print('start saving model')
    Q_NETWORK_PATH = '../my_final_4/' + 'aeroplane_trainval' + str(epoch) + '.pkl'
    torch.save(Q_net.state_dict(), Q_NETWORK_PATH)
    print('Complete')


print('start saving model')
Q_NETWORK_PATH = '../my_final_4/' + 'aeroplane_trainval.pkl'
torch.save(Q_net.state_dict(), Q_NETWORK_PATH)
print('Complete')




















