from vs_object_detection_actions import *
from vs_object_detection_tools import *
import os
import numpy as np
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# parameters
steps = 10
CLASS_OBJECT = 1
# Different actions that the agent can do
number_of_actions = 6
# Actions captures in the history vector
actions_of_history = 4

# 加载模型
print('load models')
Feature_extractor = get_feature_extractor()  # vgg_16bn().features
Feature_extractor = Feature_extractor.cuda()

# Q_network_weights_path = '../my_final_3/aeroplane_trainval_model.pkl'
# Q_network_weights_path = '../Q_network+target_network_1/aeroplane_trainval_model.pkl'
# Q_network_weights_path = '../Q_network/aeroplane_trainval_model.pkl'
Q_network_weights_path = '../Q_net/aeroplane_trainval100.pkl'
Q_net = get_q_network(Q_network_weights_path)
Q_net = Q_net.cuda()

path_voc = "../dataset/VOCdevkit/VOC2007"
data_set_name = 'aeroplane_test'
class_object = '1'
# 只考虑了含有 aeroplane 的测试图片
image_names, images = load_image_data(path_voc, data_set_name, class_object)
print('len(images) = ', len(images))

# define the Pytorch Tensor
use_cuda = torch.cuda.is_available()
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor
ByteTensor = torch.cuda.ByteTensor if use_cuda else torch.ByteTensor
Tensor = FloatTensor

from PIL import Image, ImageDraw, ImageFont
import numpy as np
path_font = "/usr/share/fonts/liberation/LiberationMono-Regular.ttf"
path_testing_folder = "../testing/"
font = ImageFont.truetype(path_font, 24)
def string_for_action(action):
    if action == 0:
        return "top_left"
    elif action == 1:
        return 'top_right'
    elif action == 2:
        return 'bottom_left'
    elif action == 3:
        return 'bottom_right'
    elif action == 4:
        return 'middle'
    elif action == 5:
        return 'trigger'
    
def draw_sequences_test(step, action, qval, draw, region_image, background, path_testing_folder,
                        region_mask, image_name, save_boolean):
    aux = np.asarray(region_image, np.uint8)
    img_offset = (1000 * step, 70)
    footnote_offset = (1000 * step, 550)
    q_predictions_offset = (1000 * step, 500)
    mask_img_offset = (1000 * step, 700)
    img_for_paste = Image.fromarray(aux)
    background.paste(img_for_paste, img_offset)
    mask_img = Image.fromarray(255 * region_mask)
    background.paste(mask_img, mask_img_offset)
    footnote = 'action: ' + string_for_action(action)
    # iou = 'iou = ' + str(iou)
    q_val_predictions_text = str(qval)
    draw.text(footnote_offset, footnote, (0, 0, 0), font=font)
    draw.text(q_predictions_offset, q_val_predictions_text, (0, 0, 0), font=font)
    # draw.text(q_predictions_offset, iou, (0, 0, 0), font=font)
    file_name = path_testing_folder + image_name + '.png'
    if save_boolean == 1:
        background.save(file_name)
    return background


# 1 if you want to obtain visualizations of the search for objects
bool_draw = 1


# total_res = 0
# for epoch in range(100):  
find_num = 0
for i in range(len(image_names)):
    # 对于每一张图片
    image_name = image_names[i]
    image = images[i]
    # ————————————————————————————————————————————————————————————————————————
    background = Image.new('RGBA', (10000, 2000), (255, 255, 255, 255))
    draw = ImageDraw.Draw(background)
    # ————————————————————————————————————————————————————————————————————————

    annotation = get_bb_of_gt_from_pascal_xml_annotation(image_name, path_voc)
    # classes_gt_objects[i]是object i属于的class
    classes_gt_objects = get_ids_objects_from_annotation(annotation)
    # gt_masks[y_min:y_max, x_min:x_max, i]=1代表图片中有object的像素点都被设为1；其余为0
    gt_masks = generate_bounding_box_from_annotation(annotation, image.shape)

    window = Action(image, gt_masks, classes_gt_objects, CLASS_OBJECT)
    history_vector = torch.zeros((actions_of_history, number_of_actions))
    state = get_state(window.now_img, history_vector, Feature_extractor)

    # ————————————————————————————————————————————————————————————————————————
    region_mask = np.zeros([image.shape[0], image.shape[1]])
    pos = window.region_position
    region_mask[pos[1]:pos[3], pos[0]:pos[2]] = 1
    step = 0
    action = -1
    iou = window.calculate_max_iou()
    draw_sequences_test(step, action, iou, draw, window.now_img, background, path_testing_folder,
                                region_mask, image_name, bool_draw)
    # ————————————————————————————————————————————————————————————————————————
    for step in range(steps):
        # 计算 Q 值，预测 action
        qval = Q_net(state)
        _, predicted = torch.max(qval.data, 1)
        action = predicted[0]

        if action == 5:
            # choose the max bouding box
            iou = window.calculate_max_iou()
            # print('iou = ', iou)
            if iou > 0.5:
                find_num += 1
                print('action 5 success:  image ', i)
            break
        else:
            # print('action = ', action)
            new_iou = window.action(action)
            # update history vector and get next state
            history_vector = update_history_vector(history_vector, action)
            next_state = get_state(window.now_img, history_vector, Feature_extractor)

        # ————————————————————————————————————————————————————————————————————————
        region_mask = np.zeros([image.shape[0], image.shape[1]])
        pos = window.region_position
        region_mask[pos[1]:pos[3], pos[0]:pos[2]] = 1
        # print('step = ', step)
        iou = window.calculate_max_iou()
        draw_sequences_test(step+1, action, iou, draw, window.now_img, background, path_testing_folder,
                                    region_mask, image_name, bool_draw)
        # ————————————————————————————————————————————————————————————————————————



        state = next_state
        if step == steps - 1:
            # choose the max bouding box
            iou = window.calculate_max_iou()
            # print('iou = ', iou)
            if iou > 0.5:
                find_num += 1
                # print('steps-1 success:  image ', i)
res = float(find_num)/float(len(image_names))
# total_res += res
print('res = ', res)

# total_res /= 100.0
# print('total_res = ', total_res)



















