from object_detection_actions import *
from object_detection_tools import *
import os
import numpy as np
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
# parameters
steps = 10
CLASS_OBJECT = 1
# Different actions that the agent can do
number_of_actions = 6
# Actions captures in the history vector
actions_of_history = 4

# 加载模型
print('load models')
Feature_extractor = get_feature_extractor()  # vgg_16bn().features
Feature_extractor = Feature_extractor.cuda()

# Q_network_weights_path = '../my_final_3/aeroplane_trainval_model.pkl'
Q_network_weights_path = '../Q_network+target_network_1/aeroplane_trainval_model.pkl'
Q_net = get_q_network(Q_network_weights_path)
Q_net = Q_net.cuda()

path_voc = "../dataset/VOCdevkit/VOC2007"
data_set_name = 'aeroplane_test'
class_object = '1'
# 只考虑了含有 aeroplane 的测试图片
image_names, images = load_image_data(path_voc, data_set_name, class_object)
print('len(images) = ', len(images))

# define the Pytorch Tensor
use_cuda = torch.cuda.is_available()
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor
ByteTensor = torch.cuda.ByteTensor if use_cuda else torch.ByteTensor
Tensor = FloatTensor

total_res = 0
for epoch in range(100):
    find_num = 0
    for i in range(len(image_names)):
        # 对于每一张图片
        image_name = image_names[i]
        image = images[i]
        annotation = get_bb_of_gt_from_pascal_xml_annotation(image_name, path_voc)
        # classes_gt_objects[i]是object i属于的class
        classes_gt_objects = get_ids_objects_from_annotation(annotation)
        # gt_masks[y_min:y_max, x_min:x_max, i]=1代表图片中有object的像素点都被设为1；其余为0
        gt_masks = generate_bounding_box_from_annotation(annotation, image.shape)

        window = Action(image, gt_masks, classes_gt_objects, CLASS_OBJECT)
        history_vector = torch.zeros((actions_of_history, number_of_actions))
        state = get_state(window.now_img, history_vector, Feature_extractor)
        for step in range(steps):
            # 计算 Q 值，预测 action
            qval = Q_net(state)
            _, predicted = torch.max(qval.data, 1)
            action = predicted[0]

            if action == 5:
                # choose the max bouding box
                iou = window.calculate_max_iou()
                # print('iou = ', iou)
                if iou > 0.5:
                    find_num += 1
                    # print('action 6 success:  image ', i)
                break
            else:
                new_iou = window.action(action)
                # update history vector and get next state
                history_vector = update_history_vector(history_vector, action)
                next_state = get_state(window.now_img, history_vector, Feature_extractor)

            state = next_state
            if step == steps - 1:
                # choose the max bouding box
                iou = window.calculate_max_iou()
                # print('iou = ', iou)
                if iou > 0.5:
                    find_num += 1
                    # print('steps-1 success:  image ', i)
    res = float(find_num)/float(len(image_names))
    # total_res += res
    print('res = ', res)

total_res /= 100.0
print('total_res = ', total_res)



















