import torch
import torch.nn as nn
import torchvision.models as models
import torch.nn.init as init
import torchvision.transforms as transforms
from torch.autograd import Variable
from collections import namedtuple
import random
import xml.etree.ElementTree as ET
from PIL import Image
import numpy as np
classes = ('aeroplane', 'bicycle', 'bird', 'boat',
           'bottle', 'bus', 'car', 'cat', 'chair',
           'cow', 'diningtable', 'dog', 'horse',
           'motorbike', 'person', 'pottedplant',
           'sheep', 'sofa', 'train', 'tvmonitor')
classes_to_index = dict(list(zip(classes, list(range(len(classes))))))

# get feature
number_of_actions = 6
AP_threshold = 0.8
reward_movement_action = 1
reward_terminal_action = 3

# transform = transforms.Compose([
#             transforms.ToPILImage(),
#             transforms.Resize((224, 224)),
#             transforms.ToTensor(),
#             transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
#                                  # std=[0.229, 0.224, 0.225])  #  numbers here need to be adjusted in future
# ])
mean = (0.4914, 0.4822, 0.4465)
std = (0.2023, 0.1994, 0.2010)
transform = transforms.Compose([
    transforms.ToPILImage(),
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    transforms.Normalize(mean=mean, std=std)
])
Transition = namedtuple('Transition', ('state', 'action', 'next_state', 'reward'))


def select_action(state, epsilon, Q_net):
    if random.random() < epsilon:
        action = np.random.randint(0, 6)
    else:
        qval = Q_net(Variable(state))
        _, predicted = torch.max(qval.data, 1)
        action = predicted[0]
    return action


def get_feature_extractor():
    model = models.vgg16_bn(pretrained=True)
    # model.classifier = nn.Sequential(*list(model.classifier.children())[:-6])
    # print(model)
    return model.features


def get_conv_feature_for_image(image, model, dtype=torch.cuda.FloatTensor):
    # print('image.shape = ', image.shape)
    im = transform(image)
    # im = image.resize((224, 224))
    im = im.view(1, *im.shape)
    feature = model(Variable(im).type(dtype))
    return feature.data


# Replay Buffer
class ReplayMemory(object):

    def __init__(self, capacity):
        self.capacity = capacity
        self.memory = []
        self.position = 0

    def push(self, *args):
        """Saves a transition."""
        if len(self.memory) < self.capacity:
            self.memory.append(None)
        self.memory[self.position] = Transition(*args)
        self.position = (self.position + 1) % self.capacity

    def sample(self, batch_size):
        return random.sample(self.memory, batch_size)

    def __len__(self):
        return len(self.memory)


def get_q_network(weights_path="0"):
    model = nn.Sequential(
        nn.Linear(25112, 1024),
        nn.ReLU(),
        nn.Dropout(0.2),
        nn.Linear(1024, 1024),
        nn.ReLU(),
        nn.Dropout(0.2),
        nn.Linear(1024, 6),
    )
    if weights_path != "0":
        model.load_state_dict(torch.load(weights_path))
        print('load weights successfully')
    else:
        # init weights by xavier_normal, it may be different with the author's implements
        def weights_init(m):
            if isinstance(m, nn.Linear):
                init.xavier_normal_(m.weight.data)

        model.apply(weights_init)
        print('xavier_normal init successfully')
    return model


def get_array_of_q_networks_for_pascal(weights_path="0"):
    q_networks = []
    if weights_path == "0":
        for i in range(20):
            q_networks.append(get_q_network("0"))
    else:
        for i in range(20):
            q_networks.append(get_q_network(weights_path + "/model" + str(i) + "h5"))
    return np.array([q_networks])


def get_array_of_replay_memory(Buffer_size):
    memorys = []
    for i in range(20):
        memorys.append(ReplayMemory(Buffer_size))
    return np.array([memorys])


def get_state(image, history_vector, model_vgg, dtype=torch.cuda.FloatTensor):
    image_feature = get_conv_feature_for_image(image, model_vgg, dtype)
    image_feature = image_feature.view(1, -1)
    history_vector_flatten = history_vector.view(1, -1).type(dtype)
    state = torch.cat((image_feature, history_vector_flatten), 1)
    return state


# FIFO
def update_history_vector(history_vector, action):
    action_vector = torch.zeros([1, number_of_actions])
    action_vector[0][action] = 1
    history_vector = torch.cat((action_vector, history_vector), 0)[:-1]
    return history_vector


def get_reward_movement(AP, new_AP):
    if new_AP > AP:
        reward = reward_movement_action
    else:
        reward = - reward_movement_action
    return reward


def get_reward_trigger(new_AP):
    if new_AP > AP_threshold:
        reward = reward_terminal_action
    else:
        reward = - reward_terminal_action
    return reward


# def get_id_of_class_name(class_name):
#     if class_name == 'aeroplane':
#         return 0
#     elif class_name == 'bicycle':
#         return 1
#     elif class_name == 'bird':
#         return 2
#     elif class_name == 'boat':
#         return 3
#     elif class_name == 'bottle':
#         return 4
#     elif class_name == 'bus':
#         return 5
#     elif class_name == 'car':
#         return 6
#     elif class_name == 'cat':
#         return 7
#     elif class_name == 'chair':
#         return 8
#     elif class_name == 'cow':
#         return 9
#     elif class_name == 'diningtable':
#         return 10
#     elif class_name == 'dog':
#         return 11
#     elif class_name == 'horse':
#         return 12
#     elif class_name == 'motorbike':
#         return 13
#     elif class_name == 'person':
#         return 14
#     elif class_name == 'pottedplant':
#         return 15
#     elif class_name == 'sheep':
#         return 16
#     elif class_name == 'sofa':
#         return 17
#     elif class_name == 'train':
#         return 18
#     elif class_name == 'tvmonitor':
#         return 19



# process image
# def load_images_labels_in_data_set(data_set_name, path_voc):
#     file_path = path_voc + '/ImageSets/Main/' + data_set_name + '.txt'
#     with open(file_path) as f:
#         images_labels = [x.strip().split()[1] for x in f.readlines()]
#     return images_labels
#
#
# # 只加载了 label==1 的图片
# def load_image_data(path_voc, data_set_name, class_object):
#     print("load images" + path_voc)
#     image_names = np.array(load_images_names_in_data_set(data_set_name, path_voc))
#     labels = load_images_labels_in_data_set(data_set_name, path_voc)
#     image_names_class = []
#     for i in range(len(image_names)):
#         if labels[i] == class_object:
#             image_names_class.append(image_names[i])
#     image_names = image_names_class
#     images = get_all_images(image_names, path_voc)
#     print("total image:%d" % len(image_names))
#     return image_names, images
#
#
# def load_images_names_in_data_set(data_set_name, path_voc):
#     file_path = path_voc + '/ImageSets/Main/' + data_set_name + '.txt'
#     with open(file_path) as f:
#         images_names = [x.strip().split()[0] for x in f.readlines()]
#     return images_names
#
#
# def get_all_images(image_names, path_voc):
#     images = []
#     for j in range(np.size(image_names)):
#         image_name = image_names[j]
#         string = path_voc + '/JPEGImages/' + image_name + '.jpg'
#         img = Image.open(string)
#         images.append(np.array(img))
#     return images
#
#
# # process xml profile
# def get_bb_of_gt_from_pascal_xml_annotation(xml_name, voc_path):
#     string = voc_path + '/Annotations/' + xml_name + '.xml'
#     tree = ET.parse(string)
#     root = tree.getroot()
#     names = []
#     x_min = []
#     x_max = []
#     y_min = []
#     y_max = []
#     for child in root:
#         if child.tag == 'object':
#             for child2 in child:
#                 if child2.tag == 'name':
#                     names.append(child2.text)  # 该object属于的class
#                 elif child2.tag == 'bndbox':  # 该object的框
#                     for child3 in child2:
#                         if child3.tag == 'xmin':
#                             x_min.append(child3.text)
#                         elif child3.tag == 'xmax':
#                             x_max.append(child3.text)
#                         elif child3.tag == 'ymin':
#                             y_min.append(child3.text)
#                         elif child3.tag == 'ymax':
#                             y_max.append(child3.text)
#     category_and_bb = np.zeros([np.size(names), 5])
#     for i in range(np.size(names)):
#         category_and_bb[i][0] = get_id_of_class_name(names[i])
#         category_and_bb[i][1] = x_min[i]
#         category_and_bb[i][2] = x_max[i]
#         category_and_bb[i][3] = y_min[i]
#         category_and_bb[i][4] = y_max[i]
#
#     # category_and_bb[i][0]是第i个object属于的class的id
#     # category_and_bb[i][1~4]是第i个object的框
#     return category_and_bb
#
#
# def get_all_annotations(image_names, voc_path):
#     annotations = []
#     for i in range(np.size(image_names)):
#         image_name = image_names[i]
#         annotations.append(get_bb_of_gt_from_pascal_xml_annotation(image_name, voc_path))
#     return annotations
#
#
# def generate_bounding_box_from_annotation(annotation, image_shape):
#     length_annotation = annotation.shape[0]
#     masks = np.zeros([image_shape[0], image_shape[1], length_annotation])
#     for i in range(0, length_annotation):
#         masks[int(annotation[i, 3]):int(annotation[i, 4]), int(annotation[i, 1]):int(annotation[i, 2]), i] = 1
#     return masks
#
#
# def get_ids_objects_from_annotation(annotation):
#     return annotation[:, 0]