# --------------------------------------------------------
# Pytorch Meta R-CNN
# Written by Anny Xu, Xiaopeng Yan, based on the code from Jianwei Yang
# --------------------------------------------------------
import os
import os.path
import sys
import torch.utils.data as data
import cv2
import torch
import random
import numpy as np
if sys.version_info[0] == 2:
    import xml.etree.cElementTree as ET
else:
    import xml.etree.ElementTree as ET
from model.utils.config import cfg
import collections

from pycocotools.coco import COCO

import skimage.io
import skimage.transform
import skimage.color
import skimage

from PIL import Image

import time
import pdb

class MetaDataset(data.Dataset):

    """Meta Dataset
    Arguments:
        root (string): filepath to VOCdevkit folder.
        image_set (string): imageset to use (eg. 'train', 'val')
        metaclass(string): the class name
        img_size(int) : the PRN network input size
        shot(int): the number of instances
        shuffle(bool)
    """

    def __init__(self, root, image_sets, metaclass, img_size, shots=1, shuffle=False, phase=1, basenovel=None, epoch=None, random_seed=0, split=1):
        
        self.root = root
        self.image_set = image_sets
        self.img_size = img_size
        self.metaclass = metaclass
        self.shots = shots
        self.epoch = epoch
        self.random_seed = random_seed
        self.split = split
        if phase == 2:
            self.shots = shots # * 3
        self.shuffle=shuffle
        self._annopath = os.path.join('%s', 'Annotations', '%s.xml')
        self._imgpath = os.path.join('%s', 'JPEGImages', '%s.jpg')
        self.basenovel = basenovel
        if not os.path.exists(os.path.join(self.root, 'VOC2007', 'ImageSets/Main/%s/' % (self.split))):
            os.mkdir(os.path.join(self.root, 'VOC2007', 'ImageSets/Main/%s/' % (self.split)))
        self.shot_path_copy  = open(os.path.join(self.root, 'VOC2007', 'ImageSets/Main/%s/%s_%s_%s_shots_epoch_%s.txt' % (self.split, self.random_seed, self.basenovel, shots, self.epoch)), 'w')  # the default saved path
        self.shot_path  = open(os.path.join(self.root, 'VOC2007', 'ImageSets/Main/%s_shots.txt' % (self.basenovel)), 'w')  # the default saved path
        self.ids = list()
        for (year, name) in image_sets:
            self._year = year
            rootpath = os.path.join(self.root, 'VOC' + year)
            for line in open(os.path.join(rootpath, 'ImageSets', 'Main', name + '.txt')):
                self.ids.append((rootpath, line.strip()))
        if basenovel == 'base':
            class_to_idx = dict(zip(self.metaclass, range(len(self.metaclass))))  # class to index mapping
        else:
            class_to_idx = dict(zip(self.metaclass, range(15, 15 + len(self.metaclass))))

        self.prndata = []
        self.prncls = []
        prn_image, prn_mask, gt_boxes, prn_name = self.get_prndata()
        
        for i in range(shots):
            cls = []
            data = []
            for n, key in enumerate(list(prn_image.keys())):
                
                img = torch.from_numpy(np.array(prn_image[key][i]))
                img = img.unsqueeze(0)
                
                mask = torch.from_numpy(np.array(prn_mask[key][i]))
                mask = mask.unsqueeze(0)
                mask = mask.unsqueeze(3)
                imgmask = torch.cat([img, mask], dim=3)
                
                data.append(imgmask.permute(0, 3, 1, 2).contiguous())
                cls.append(class_to_idx[key])
                
            self.prncls.append(cls)
            self.prndata.append(torch.cat(data,dim=0))
        gt_boxes = []

    def __getitem__(self, index):
        return  self.prndata[index],self.prncls[index]

    def get_prndata(self):
        '''
        :return: the construct prn input data
        '''
        if self.shuffle:
            random.shuffle(self.ids)
        else:
            random.seed(self.epoch + self.random_seed)
            random.shuffle(self.ids)
        
        
        prn_image = collections.defaultdict(list)
        prn_mask = collections.defaultdict(list)
        prn_name = collections.defaultdict(list)
        gt_boxes = collections.defaultdict(list)
        classes = collections.defaultdict(int)
        for cls in self.metaclass:
            classes[cls] = 0
        for img_id in self.ids:
            target = ET.parse(self._annopath % img_id).getroot()
            img = cv2.imread(self._imgpath % img_id, cv2.IMREAD_COLOR)
            img = img[:, :, ::-1]
            img = img.astype(np.float32, copy=False)
            img -= cfg.PIXEL_MEANS
            height, width, _ = img.shape
            mask = np.zeros((self.img_size, self.img_size), dtype=np.float32)
            h, w, _ = img.shape
            
            y_ration = float(h) / self.img_size
            x_ration = float(w) / self.img_size
            img_resize = cv2.resize(img, (self.img_size, self.img_size), interpolation=cv2.INTER_LINEAR)
            
            for obj in target.iter('object'):
                difficult = int(obj.find('difficult').text) == 1
                if difficult:
                    continue
                name = obj.find('name').text.strip()
                if name not in self.metaclass:
                    continue
                
                if classes[name] >= self.shots:
                    break
                classes[name] += 1
                bbox = obj.find('bndbox')
                pts = ['xmin', 'ymin', 'xmax', 'ymax']
                # pdb.set_trace()
                bndbox = []
                
                for i, pt in enumerate(pts):
                    cur_pt = int(float(bbox.find(pt).text)) - 1
                    if i % 2 == 0:
                        cur_pt = int(cur_pt / x_ration)
                        bndbox.append(cur_pt)
                    elif i % 2 == 1:
                        cur_pt = int(cur_pt / y_ration)
                        bndbox.append(cur_pt)
                mask[bndbox[1]:bndbox[3], bndbox[0]:bndbox[2]] = 1
                prn_image[name].append(img_resize)
                prn_mask[name].append(mask)
                    
                prn_name[name].append(img_id[1])
                gt_boxes[name].append(bndbox)
                self.shot_path.write(str(img_id[1])+'\n')
                self.shot_path_copy.write(str(img_id[1])+'\n')
                break
            if len(classes)>0 and min(classes.values()) == self.shots:
                break
        self.shot_path_copy.close()
        self.shot_path.close()
        return prn_image, prn_mask, gt_boxes, prn_name

    def __len__(self):
        return len(self.prndata)

class COCO_MetaDataset(data.Dataset):
    
    """Meta Dataset
    Arguments:
        root (string): filepath to COCO folder.
        image_set (string): imageset to use (eg. 'train', 'val')
        metaclass(string): the class name
        img_size(int) : the PRN network input size
        shot(int): the number of instances
        shuffle(bool)
    """

    def __init__(self, root, image_sets, metaclass, img_size, shots=1, shuffle=False, phase=1):
        self.root = root
        self.image_set = image_sets
        self.img_size = img_size
        self.metaclass = metaclass
        self.shots = shots
        if phase == 2:
            self.shots = shots * 3
        self.shuffle = shuffle
        
        # only one dataset
        for id, image_set in enumerate(image_sets):
            self._year, self._name = image_set
        
        self._annopath = os.path.join(self.root, 'annotations', 'instances_' + self._name + self._year + '.json')
        self._imgpath = os.path.join(self.root, self._name + self._year)
        # self.shot_path = open(os.path.join(self.root, 'Meta R-CNN/shots.txt'), 'w')  # the default saved path
        self.coco = COCO(self._annopath)
        self.class_to_idx = dict(zip(self.metaclass, range(len(self.metaclass))))  # class to index mapping
        self.cocoid_to_indexid()
        self.image_ids_set = []
        for idx in self.categories_ids_base:
            self.image_ids_set.append(self.coco.getImgIds(catIds= [idx])[0:3*self.shots])

        self.prndata = []
        self.prncls = []
        prn_image, prn_mask = self.get_prndata()
        
        for i in range(self.shots):
            cls = []
            data = []
            for n, key in enumerate(list(prn_image.keys())):
                img = torch.from_numpy(np.array(prn_image[key][i]))
                img = img.unsqueeze(0)
                mask = torch.from_numpy(np.array(prn_mask[key][i]))
                mask = mask.unsqueeze(0)
                mask = mask.unsqueeze(3)
                imgmask = torch.cat([img, mask], dim=3)
                data.append(imgmask.permute(0, 3, 1, 2).contiguous())
                cls.append(self.class_to_idx[key])
            self.prncls.append(cls)
            self.prndata.append(torch.cat(data,dim=0))
        
        self.coco = []

    def cocoid_to_indexid(self):
        categories = self.coco.loadCats(self.coco.getCatIds())
        self.categories_ids = {}
        self.categories_ids_base = []
        self.categories_names_base = []
        self.categories_names_other = []
        self.categories_names = {}
        # c['id'] is coco_label, class_to_idx[c['name']] is label, c['name'] is label_name
        for c in categories:
            if c['name'] not in self.metaclass:
                self.categories_names_other.append(c['name'])
                continue
            self.categories_ids_base.append(c['id'])
            self.categories_names_base.append(c['name'])
            self.categories_ids[c['id']] = self.class_to_idx[c['name']]
            self.categories_names[self.class_to_idx[c['name']]] = c['name']
        #print(self.categories_names_base)

    def __getitem__(self, index):
        return self.prndata[index],self.prncls[index]

    def load_image(self, image_index):
        image_info = self.coco.loadImgs(image_index)[0]
        image_path = os.path.join(self._imgpath, image_info['file_name'])
        image = skimage.io.imread(image_path)

        if len(image.shape) == 2:
            image = skimage.color.gray2rgb(image)
        
        return self.Normalize(image.astype(np.float32)/255.0)
    
    def load_annotation(self, image_index):
        # get ground truth annotations
        annotations_ids = self.coco.getAnnIds(image_index, iscrowd = False)
        annotations_result = np.zeros((0, 5))

        # some images appear to miss annotations (like image with id 257034)
        if len(annotations_ids) == 0:
            return annotations_result
        
        annotations = self.coco.loadAnns(annotations_ids)
        for index, content in enumerate(annotations):
            if content['category_id'] not in self.categories_ids_base:
                continue

            # some annotations have basically no width / height, skip them
            if content['bbox'][2] < 1 or content['bbox'][3] < 1:
                continue

            annotation = np.zeros((1, 5))
            annotation[0, 0:4] = content['bbox']
            annotation[0, 4] = self.categories_ids[content['category_id']]
            annotations_result = np.append(annotations_result, annotation, axis=0)
        
        # transform from [x, y, w, h] to [x1, y1, x2, y2]
        annotations_result[:, 2] = annotations_result[:, 0] + annotations_result[:, 2]
        annotations_result[:, 3] = annotations_result[:, 1] + annotations_result[:, 3]

        return annotations_result
    
    def get_prndata(self):
        '''
        :return: the construct prn input data
        '''
        prn_image = collections.defaultdict(list)
        prn_mask = collections.defaultdict(list)
        classes = collections.defaultdict(int)
        for cls in self.metaclass:
            classes[cls] = 0
        for img_ids in self.image_ids_set:
            for img_id in img_ids:
                # Image operation
                img = self.load_image(img_id)
                img = img[:, :, ::-1]
                img = img.astype(np.float32, copy=False)
                height, width, _ = img.shape
                mask = np.zeros((self.img_size, self.img_size), dtype=np.float32)
                h, w, _ = img.shape
                y_ration = float(h) / self.img_size
                x_ration = float(w) / self.img_size
                img_resize = cv2.resize(img, (self.img_size, self.img_size), interpolation=cv2.INTER_LINEAR)
                # Load annotation
                annotations = self.load_annotation(img_id)
                # generate mask from annotation
                for row_id in range(annotations.shape[0]):
                    name = self.categories_names[annotations[row_id, 4]]
                    
                    if name not in self.metaclass:
                        # print('the class of this bbox is %s, which does not in metaclass' % name)
                        continue
                    # print('the class of this bbox is %s, which is in metaclass' % name)
                    # print('\r' + str(classes), end='', flush=True)
                    
                    if classes[name] >= self.shots:
                        break
                    classes[name] += 1
                    bndbox = []
                    bndbox.append(int(annotations[row_id, 0] / x_ration))
                    bndbox.append(int(annotations[row_id, 1] / y_ration))
                    bndbox.append(int(annotations[row_id, 2] / x_ration))
                    bndbox.append(int(annotations[row_id, 3] / y_ration))
                    mask[bndbox[1]:bndbox[3], bndbox[0]:bndbox[2]] = 1
    
                    prn_image[name].append(img_resize)
                    prn_mask[name].append(mask)
                    # self.shot_path.write(str(img_id[1])+'\n')
                    break
                if len(classes) > 0 and min(classes.values()) == self.shots:
                    break
        # self.shot_path.close()
        return prn_image, prn_mask

    def Normalize(self, image):
        self.mean = np.array([[[0.485, 0.456, 0.406]]])
        self.std = np.array([[[0.229, 0.224, 0.225]]])

        return (image - self.mean) / self.std

    def __len__(self):
        return len(self.prndata)

