import os
import xml.etree.ElementTree as ET
import cv2
from torch.utils import data
import numpy as np
from torchvision import transforms as T

CLASSES = ['person', 'bird', 'cat', 'cow', 'dog',
               'horse', 'sheep', 'aeroplane', 'bicycle', 'boat',
               'bus', 'car', 'motorbike', 'train', 'bottle',
               'chair', 'dining table', 'potted plant', 'sofa', 'tvmonitor']
# 将数据集转变为需要的格式  以文本形式存放
class ChangeToLable(object):
    def __init__(self):
        self.DataSetPath = os.getcwd()

    def convert(self, size, box):
        dw = 1/size[0]
        dh = 1/size[1]
        x = (box[0]+box[1])/2
        y = (box[2]+box[3])/2       # 获取中心点
        w = box[1] - box[0]
        h = box[3] - box[2]
        x *= dw
        w *= dw
        y *= dh
        h *= dh
        return (x, y, w, h)

    def convert_annotation(self, image_id):
        in_file = open(self.DataSetPath+"/Annotations/%s"%(image_id))    #根据id获取文件的信息路径
        image_id = image_id.split('.')[0]       # 通过文件名获取ID
        out_file = open("./label/%s.txt" % (image_id), 'w')
        tree = ET.parse(in_file)
        root = tree.getroot()
        size = root.find("size")
        w = int(size.find("width").text)
        h = int(size.find("height").text)

        for obj in root.iter("object"):
            difficult = obj.find('difficult').text
            cls = obj.find('name').text
            if cls not in CLASSES or int(difficult) == 1:
                continue
            cls_id = CLASSES.index(cls)
            xmlbox = obj.find("bndbox")
            point = (float(xmlbox.find("xmin").text), float(xmlbox.find("xmax").text), float(xmlbox.find("ymin").text), float(xmlbox.find("ymax").text), )
            bb = self.convert((w,h), point)
            out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + "\n")

    def make_label_text(self):
        filenames = os.listdir(self.DataSetPath + "/Annotations")
        for file in filenames:
            self.convert_annotation(file)


# 检验图片是否正确
def show_labels_img(imgname):
    img = cv2.imread("./JPEGImages/"+imgname+".jpg")
    h, w = img.shape[:2]
    print(h,w)
    label = []
    with open("./label/" + imgname +".txt", "r") as flabel:
        for label in flabel:
            label = label.split(" ")
            label = [float(x.strip()) for x in label]
            pt1 = (int(label[1] * w - label[3] * w / 2) + 5, int(label[2] * h - label[4] * h / 2)+5)
            pt2 = (int(label[1] * w + label[3] * w / 2), int(label[2] * h + label[4] * h / 2))
            cv2.putText(img, CLASSES[int(label[0])], pt1, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))
            cv2.rectangle(img, pt1, pt2, (0, 0, 255, 2))

    cv2.imshow("img", img)
    cv2.waitKey(0)


class VOC2012(data.Dataset):
    def __init__(self, is_train=True, is_aug=True):   # is_train  选择训练集还是验证集    is_aug是否进行数据增广
        self.fileNames = []             # 获取相应数据集名称
        if is_train:
            with open(os.getcwd() + "/data/ImageSets/Main/train.txt", "r") as f:
                self.fileNames = [x.strip() for x in f]
        else:
            with open(os.getcwd() + "/data/ImageSets/Main/val.txt", "r") as f:
                self.fileNames = [x.strip() for x in f]

        self.imgPath = os.getcwd() + "/data/JPEGImages/"
        self.labelPath = os.getcwd() + "/data/label/"
        self.is_aug = is_aug                # 相应的路径以及是否进行增广

    def __len__(self):
        return len(self.fileNames)

    def Convert_bboxTLable(self, bbox, cell=7, bbox_num=2, class_num=20):
        labels = np.zeros((7, 7, 5*bbox_num+class_num))
        for i in range(len(bbox) // 5):
            gridx = int(bbox[i*5+1] * cell)
            gridy = int(bbox[i*5+2] * cell)
            gridpx = bbox[i*5+1] * cell - gridx
            gridpy = bbox[i*5+2] * cell - gridy         # 获得相对网格大小的x/w,y/h
            # 将第gridy行，gridx列的网格设置为负责当前ground truth的预测，置信度和对应类别概率均置为1
            labels[gridy, gridx, 0:5] = np.array([gridpx, gridpy, bbox[i * 5 + 3], bbox[i * 5 + 4], 1])
            labels[gridy, gridx, 5:10] = np.array([gridpx, gridpy, bbox[i * 5 + 3], bbox[i * 5 + 4], 1])
            labels[gridy, gridx, 10 + int(bbox[i * 5])] = 1
        return labels


    def __getitem__(self, item):
        img = cv2.imread(self.imgPath+self.fileNames[item]+".jpg")
        h, w = img.shape[0:2]
        input_size = 448
        padw, padh = 0, 0
        if h>w:
            padw = (h-w)//2
            img = np.pad(img, ((0, 0), (padw, padw), (0, 0)), "constant", constant_values=0)
        elif w>h:
            padh = (w-h)//2
            img = np.pad(img, ((padh, padh), (0, 0), (0, 0)), "constant", constant_values=0)
        img = cv2.resize(img, (input_size, input_size))
        if self.is_aug:
            aug = T.Compose([
                T.ToTensor()
            ])
            img = aug(img)

        with open(self.labelPath+self.fileNames[item]+".txt") as f:
            bbox = f.read().split("\n")
        bbox = [x.split() for x in bbox]
        bbox = [float(x) for y in bbox for x in y]
        if len(bbox) % 5 != 0:
            raise ValueError("File:" + self.labelPath + self.fileNames[item] + ".txt" + "——bbox Extraction Error!")

        # 根据padding修改bbox
        for i in range(len(bbox) // 5):
            if padw != 0:
                bbox[i * 5 + 1] = (bbox[i * 5 + 1] * w + padw) / h
                bbox[i * 5 + 3] = (bbox[i * 5 + 3] * w) / h
            elif padh != 0:
                bbox[i * 5 + 2] = (bbox[i * 5 + 2] * h + padw) / w
                bbox[i * 5 + 4] = (bbox[i * 5 + 4] * w) / w
        labels = self.Convert_bboxTLable(bbox)
        labels = T.ToTensor()(labels)
        return img, labels







