import numpy as np
from torch.utils.data import Dataset
import os
import torch
import json
from PIL import Image
from pathlib import Path
from my_utils.xml_utils import parse_xml_to_dict
from my_utils.voc_utils import voc_anno_2_coco_target
import pdb

class VOCDataSet(Dataset):
    """读取解析PASCAL VOC2007/2012数据集"""

    def __init__(self, voc_root, year="2012", transforms=None, txt_name: str = "train.txt"):
        assert year in ["2007", "2012"], "year must be in ['2007', '2012']"
        # 增加容错能力
        if "VOCdevkit" in voc_root:
            self.root = Path(voc_root) / f"VOC{year}"
        else:
            self.root = Path(voc_root) / "VOCdevkit" / f"VOC{year}"
        self.img_root = self.root / "JPEGImages"
        self.annotations_root = self.root / "Annotations"

        # read train.txt/val.txt file
        txt_path = self.root / "ImageSets" / "Main" / txt_name
        assert txt_path.exists(), f"not found {txt_path} file."

        # 从train.txt/val.txt中读出所有的xml描述文件
        with open(txt_path) as read:
            xml_list = [self.annotations_root / f'{line.strip()}.xml'
                        for line in read.readlines() if len(line.strip()) > 0]

        self.xml_list = self.__valide_annotation_files(xml_list)
        assert len(self.xml_list) > 0, f"in '{txt_path}' file does not find any information."

        # read class_indict
        json_file = Path('./pascal_voc_classes.json')
        assert json_file.exists(), f"{json_file} not found."
        with open(json_file, 'r') as f:
            self.class_dict = json.load(f)

        self.transforms = transforms

    def __valide_annotation_files(self, xml_list:list)->list:
        rs = []
        # 检查文件是否是正确的
        for xml_path in xml_list:
            if not xml_path.exists():
                print(f"Warning: not found '{xml_path}', skip this annotation file.")
                continue
            # check for targets
            with open(xml_path) as fid:
                xml_str = fid.read()
            data = parse_xml_to_dict(xml_str)["annotation"]
            if not data:
                print(f"INFO: invalidate annotation file {xml_path}, skip.")
                continue

            if "object" not in data:
                print(f"INFO: no objects in {xml_path}, skip.")
                continue

            rs.append(xml_path)
        return rs

    def __len__(self):
        return len(self.xml_list)

    def __getitem__(self, idx):
        # read xml
        xml_path = self.xml_list[idx]
        with open(xml_path) as fid:
            xml_str = fid.read()
        anno = parse_xml_to_dict(xml_str)["annotation"]
        assert "object" in anno, f"{xml_path} lack of object information."

        target = voc_anno_2_coco_target(idx, anno, self.class_dict)

        img_path = self.img_root / anno["filename"]
        image = Image.open(img_path)
        assert image.format == "JPEG", f"Image '{img_path}' format not JPEG"

        if self.transforms:
            image, target = self.transforms(image, target)

        return image, target

    def get_height_and_width(self, idx):
        # read xml
        xml_path = self.xml_list[idx]
        with open(xml_path) as fid:
            xml_str = fid.read()
        data = parse_xml_to_dict(xml_str)["annotation"]
        data_height = int(data["size"]["height"])
        data_width = int(data["size"]["width"])
        return data_height, data_width

    def coco_index(self, idx):
        """
        该方法是专门为pycocotools统计标签信息准备，不对图像和标签作任何处理
        由于不用去读取图片，可大幅缩减统计时间

        Args:
            idx: 输入需要获取图像的索引
        """
        # read xml
        xml_path = self.xml_list[idx]
        with open(xml_path) as fid:
            xml_str = fid.read()
        data = parse_xml_to_dict(xml_str)["annotation"]
        data_height = int(data["size"]["height"])
        data_width = int(data["size"]["width"])
        boxes = []
        labels = []
        iscrowd = []
        for obj in data["object"]:
            xmin = float(obj["bndbox"]["xmin"])
            xmax = float(obj["bndbox"]["xmax"])
            ymin = float(obj["bndbox"]["ymin"])
            ymax = float(obj["bndbox"]["ymax"])
            boxes.append([xmin, ymin, xmax, ymax])
            labels.append(self.class_dict[obj["name"]])
            iscrowd.append(int(obj["difficult"]))

        # convert everything into a torch.Tensor
        boxes = torch.as_tensor(boxes, dtype=torch.float32)
        labels = torch.as_tensor(labels, dtype=torch.int64)
        iscrowd = torch.as_tensor(iscrowd, dtype=torch.int64)
        image_id = torch.tensor([idx])
        area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])

        target = {}
        target["boxes"] = boxes
        target["labels"] = labels
        target["image_id"] = image_id
        target["area"] = area
        target["iscrowd"] = iscrowd

        return (data_height, data_width), target

    @staticmethod
    def collate_fn(batch):
        return tuple(zip(*batch))
