from __future__ import print_function, division
import sys
import os
from typing import Any
import xml.etree.ElementTree as ET
from pathlib import Path
import torch
import numpy as np
import random
import csv

from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from torch.utils.data.sampler import Sampler
import torch.nn.functional as F

from pycocotools.coco import COCO
from util.common import image_preprocess, annotation_resize, xywh2xyxy
import cv2

from PIL import Image


def parse_per_xml(xml_name):
    with open(xml_name, "r", encoding='UTF-8') as in_file:
        tree = ET.parse(in_file)
        root = tree.getroot()
        image_name = root.find('filename').text
        size = root.find('size')
        w = int(size.find('width').text)
        h = int(size.find('height').text)
        depth = int(size.find('depth').text)
        info_dict = {'filename': image_name, 'width': w, 'height': h, 'depth': depth}
        objs = []
        for obj in root.iter('object'):
            cls = obj.find('name').text
            xmlbox = obj.find('bndbox')
            xmin = int(xmlbox.find('xmin').text)
            ymin = int(xmlbox.find('ymin').text)
            xmax = int(xmlbox.find('xmax').text)
            ymax = int(xmlbox.find('ymax').text)
            objs.append([cls, xmin, ymin, xmax, ymax])
        return info_dict, objs

# simple dataset for classify
class simple_dataset(Dataset):
    def __init__(self, root_path, set_name):
        super(simple_dataset, self).__init__()
        f = open(os.path.join(root_path, set_name), 'r')
        self.data_dict = []
        self.classes = []
        for l in f.readlines():
            file_name, cls = l.strip().split('\t')
            cls = int(cls)
            if not cls in self.classes:
                self.classes.append(cls)
            self.data_dict.append({'image': os.path.join(root_path, 'images', file_name), 'cls': cls})
        f.close()

    def __getitem__(self, idx):
        image = cv2.imread(self.data_dict[idx]['image'])
        image = torch.from_numpy(image).permute(2, 0, 1)
        # cls = F.one_hot(self.data_dict[idx]['cls'], num_classes = len(self.classes))
        cls = self.data_dict[idx]['cls']
        return image, torch.tensor(cls).float()

    def __len__(self):
        return len(self.data_dict)

    def num_classes(self):
        if len(self.classes) == 2:
            return 1
        return len(self.classes)

# 简单的目标检测Dataset，接受txt格式的annotation，每张图写一行，格式是
# image_path boxes
# image_path 为图片在dataset中的相对位置，如对于如下的文件树，image_path可以为images/1.jpg、image/2.jpg、 ...
# 其中boxes表示方式为cls,x1,y1,x2,y2多个boxes之间用空格隔开
# 数据集文件树：
# dataset
#    |
#    |---images
#          |
#          |---1.jpg
#          |---2.jpg
#          ...
#    |---train.txt
#    |---val.txt
#    |---total.txt
#    |---classes.txt

class SimpleDetDataset(Dataset):
    # resize：输出的图片的大小；xyxy：__getitem__返回的box的格式是xyxy(True)或者xywh(False)
    def __init__(self, root_dir, annotation_file='train.txt', batch=16, resize=(640,640), xyxy=True, transform=None, device=torch.device('cpu')):
        self.root_dir = root_dir
        self.annotation_file = annotation_file
        self.batch = batch
        self.outsize = resize
        self.device = device
        if not os.path.exists(self.root_dir):
            print('label file {} does not exists'.format(self.root_dir))
            return
        f = open(os.path.join(self.root_dir, self.annotation_file), 'r')
        self.annotations_lines = [l.strip() for l in f.readlines()]
        f.close()
        self.images_paths = []
        self.images_names = []
        self.labels = []
        for l in self.annotations_lines:
            image_path = os.path.join(self.root_dir, l.split(' ')[0])
            if not os.path.exists(image_path):
                self.annotations_lines.remove(l)
                continue
            self.images_paths.append(image_path)
            self.images_names.append(Path(image_path).name)
            bboxes = l.split(' ')[1:]
            label = []
            for box in bboxes:
                int_box = [int(x) for x in box.split(',')]
                label.append(int_box)
            self.labels.append(label)
        self.classes = []
        classes_path = os.path.join(root_dir, 'classes.txt')
        if os.path.exists(classes_path):
            f = open(classes_path, 'r')
            self.classes = [l.strip() for l in f.readlines()]
            f.close()

        # self.labels = np.array(self.labels, dtype=np.float64)

    def __getitem__(self, idx):
        with torch.no_grad():
            img = cv2.imread(self.images_paths[idx])
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            _annot = np.array(self.labels[idx])
            nl = _annot.shape[0]                                     #有几个框
            annot = np.zeros((nl, 6), dtype=np.float64)
            annot[:, 1:] = _annot
            annot = annotation_resize(annot, img.shape, self.outsize)
            img = image_preprocess(img, self.outsize, True)          #在image_preprocess中完成的基本预处理环节包括：大小调整、归一化

            imgTensor = torch.from_numpy(img).permute(2, 0, 1)
            annotTensor = torch.from_numpy(annot)
            return imgTensor, annotTensor                        #返回的annot格式是imageidx, cls, xyxy
    
    def __len__(self):
        return len(self.images_paths)
    
    def num_classes(self):
        return len(self.classes)
    # 手动堆叠数据
    @staticmethod
    def collate_fn(batch):
        img, label = zip(*batch)  # transposed
        for i, l in enumerate(label):
            l[:, 0] = i  # add target image index for build_targets()
        return torch.stack(img, 0), torch.cat(label, 0)
    
# 标准的voc数据集加载类
# VOCdevkit
#     |
#     |---VOC2007
#            |
#            |---Annotations
#                    |
#                    |---000005.xml
#                    |---...
#            |---ImageSets
#            |---JPEGImages
#                    |
#                    |---000005.jpg
#            |---SegmentationClass
#            |---SegmentationObject
class VOCDataset(Dataset):
    def __init__(self, root_dir='VOCdevkit', set_name='VOC2007', predefined_classes='', resize=(640, 640)):
        super().__init__()
        self.outsize = resize
        self.root_dir = root_dir
        self.set_name = set_name
        annotations_path = os.path.join(self.root_dir, self.set_name, 'Annotations')
        self.images_path = os.path.join(self.root_dir, self.set_name, 'JPEGImages')
        self.annotations = [os.path.join(annotations_path, x) for x in os.listdir(annotations_path)]
        # 获得所有类别
        if predefined_classes:
            f = open(os.path.join(self.root_dir, self.set_name, predefined_classes))
            for l in f.readlines():
                cls_name = l.strip()
                self.classes_codes[cls_name] = len(self.classes)
                self.classes.append(cls_name)
            f.close()
        else:
            self.classes = []
            self.classes_codes = {}
            for xml_file in self.annotations:
                _, objs = parse_per_xml(xml_file)
                for obj in objs:
                    if not obj[0] in self.classes:
                        self.classes_codes[obj[0]] = len(self.classes)
                        self.classes.append(obj[0])
    def __getitem__(self, index):
        img_info, objs = parse_per_xml(self.annotations[index])
        image_file = os.path.join(self.images_path, img_info['filename'])
        if not os.path.exists(image_file):
            raise ValueError('{} does not exists'.format(image_file))
        img = cv2.imread(image_file)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

        annot = np.zeros((len(objs), 6))
        for obj in objs:
            obj[0] = self.classes_codes[obj[0]]
        objs = np.array(objs)

        annot[:, 1:] = objs

        # annot = annotation_resize(annot, img.shape, self.outsize)
        # img = image_preprocess(img, self.outsize, True)
        imgTensor = torch.from_numpy(img).permute(2, 0, 1)
        annotTensor = torch.from_numpy(annot)
        return imgTensor, annotTensor                        #返回的annot格式是imageidx, cls, xyxy
    def __len__(self):
        return len(self.annotations)
    
    def num_classes(self):
        return len(self.classes)
    
    @staticmethod
    def collate_fn(batch):
        img, label = zip(*batch)  # transposed
        for i, l in enumerate(label):
            l[:, 0] = i  # add target image index for build_targets()
        return torch.stack(img, 0), torch.cat(label, 0)


# 标准coco数据集加载类
class CocoDataset(Dataset):
    """Coco dataset."""

    def __init__(self, root_dir, set_name='train2017', batch=16, transform=None):
        """
        Args:
            root_dir (string): COCO directory.
            transform (callable, optional): Optional transform to be applied
                on a sample.
        """
        self.root_dir = root_dir
        self.set_name = set_name
        self.transform = transform
        self.batch = batch
        self.coco = COCO(os.path.join(self.root_dir, 'annotations', 'instances_' + self.set_name + '.json'))
        self.image_ids = self.coco.getImgIds()

        self.load_classes()

    def load_classes(self):
        # load class names (name -> label)
        categories = self.coco.loadCats(self.coco.getCatIds())
        categories.sort(key=lambda x: x['id'])

        self.classes = {}
        self.coco_labels = {}
        self.coco_labels_inverse = {}
        for c in categories:
            self.coco_labels[len(self.classes)] = c['id']
            self.coco_labels_inverse[c['id']] = len(self.classes)
            self.classes[c['name']] = len(self.classes)

        # also load the reverse (label -> name)
        self.labels = {}
        for key, value in self.classes.items():
            self.labels[value] = key

    def __len__(self):
        return len(self.image_ids)

    def __getitem__(self, idx):                        #在dataset中完成的基本预处理环节包括：大小调整、通道顺序调整、张量维度调整、归一化

        img = self.load_image(idx)
        annot = self.load_annotations(idx)
        annot = xywh2xyxy(annot)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        annot = annotation_resize(annot, img.shape, (640, 640))
        img = image_preprocess(img, (640, 640), True)
        ImgTensor = torch.from_numpy(img)
        ImgTensor = ImgTensor.permute(2, 0, 1)

        return ImgTensor, torch.from_numpy(annot)  # annotation: category_id, bbox

    def load_image(self, image_index):
        image_info = self.coco.loadImgs(self.image_ids[image_index])[0]
        path = os.path.join(self.root_dir, 'images', self.set_name, image_info['file_name'])
        img = cv2.imread(path)
        # img = cv2.resize(img, (640, 640))
        # img.astype(np.float32) / 255.0
        return img

    def load_annotations(self, image_index):
        # get ground truth annotations
        annotations_ids = self.coco.getAnnIds(imgIds=self.image_ids[image_index], iscrowd=False)
        annotations = np.zeros((0, 6))

        # some images appear to miss annotations (like image with id 257034)
        if len(annotations_ids) == 0:
            return annotations

        # parse annotations
        coco_annotations = self.coco.loadAnns(annotations_ids)
        for idx, a in enumerate(coco_annotations):

            # some annotations have basically no width / height, skip them
            if a['bbox'][2] < 1 or a['bbox'][3] < 1:
                continue

            annotation = np.zeros((1, 6))
            annotation[0, 2:6] = a['bbox']
            annotation[0, 1] = self.coco_label_to_label(a['category_id'])
            annotations = np.append(annotations, annotation, axis=0)

        # transform from [x, y, w, h] to [x1, y1, x2, y2]
        # annotations[:, 3] = annotations[:, 1] + annotations[:, 3]
        # annotations[:, 4] = annotations[:, 2] + annotations[:, 4]

        return annotations  # 返回的bbox格式是xyxy

    @staticmethod
    def collate_fn(batch):
        img, label = zip(*batch)  # transposed
        for i, l in enumerate(label):
            l[:, 0] = i  # add target image index for build_targets()
        return torch.stack(img, 0), torch.cat(label, 0)

    def coco_label_to_label(self, coco_label):
        return self.coco_labels_inverse[coco_label]

    def label_to_coco_label(self, label):
        return self.coco_labels[label]

    def image_aspect_ratio(self, image_index):
        image = self.coco.loadImgs(self.image_ids[image_index])[0]
        return float(image['width']) / float(image['height'])

    def num_classes(self):
        # return 80
        return len(self.coco.cats)



