from torch.utils.data import Dataset, DataLoader
import os
import numpy as np
import cv2
import torch
from tqdm import tqdm

import config.Yolov4 as cfg
import dataset.dataaug.dataAug as dataAug

class Yolo_dataset(Dataset):
    def __init__(self, type):
        self.type = type
        self.image_shape = [416,416]
        self.train_path = os.path.join(cfg.Dataset_path, 'train_annotation.txt')
        self.val_path = os.path.join(cfg.Dataset_path, 'val_annotation.txt')
        self.train_truth = self.__load_annotations(self.train_path)
        self.val_truth = self.__load_annotations(self.val_path)
        self.train_imgs_path = list(self.train_truth.keys())
        self.val_imgs_path = list(self.val_truth.keys())

        self.augmentation = True if self.type == 'train' else False
        self.hsv_chance = True
        self.RandomHorizontalFilp = True
        self.RandomVerticalFilp = True
        self.RandomCrop = True
        self.RandomAffine = True
        self.Blur = True
        self.Gaussian_Noise = False
        self.Random_Resize = True
        self.Resize = True
        self.mosaic = True if self.type=='train' else False

    def __load_annotations(self,path):
        truth = {}
        if not os.path.isfile(cfg.dataset_truth):
            with open(path, 'r', encoding='utf-8') as f:
                pbar = f.readlines()
                pbar = tqdm(pbar)
                for line in pbar:
                    data = line.split(' ')
                    truth[data[0]] = []
                    for i in data[1:]:
                        truth[data[0]].append([int(float(j)) for j in i.split(',')])
            torch.save(truth, cfg.dataset_truth)
        else:
            truth = torch.load(cfg.dataset_truth)
        return truth


    def __parse_annotation(self, index):
        img_path = self.train_imgs_path[index] if self.type=='train' else self.val_imgs_path[index]
        truth = self.train_truth if self.type=='train' else self.val_truth
        img = cv2.imread(img_path)
        assert img is not None, 'File Not Found ' + img_path
        boxes = np.array(truth.get(img_path), dtype=np.float)
        img,boxes = self.__dataAug(img,boxes,self.augmentation)
        return img, boxes

    def __dataAug(self, img, boxes, augmentation):
        if augmentation==True:
            img, boxes = dataAug.hsv_chance(self.hsv_chance)(np.copy(img), np.copy(boxes))
            img, boxes = dataAug.RandomHorizontalFilp(self.RandomHorizontalFilp)(np.copy(img), np.copy(boxes))
            img, boxes = dataAug.RandomVerticalFilp(self.RandomVerticalFilp)(np.copy(img), np.copy(boxes))
            img, boxes = dataAug.RandomCrop(self.RandomCrop)(np.copy(img), np.copy(boxes))
            # img, boxes = dataAug.RandomAffine(self.RandomAffine)(np.copy(img), np.copy(boxes))      # 这个会让图片有效的信息变少
            img, boxes = dataAug.Blur(self.Blur)(np.copy(img), np.copy(boxes))
            img, boxes = dataAug.Gaussian_Noise(self.Gaussian_Noise)(np.copy(img), np.copy(boxes))
            img, boxes = dataAug.Random_Resize(self.Random_Resize)(np.copy(img), np.copy(boxes))
            # img, boxes = dataAug.Resize_to_shape(self.image_shape)(np.copy(img), np.copy(boxes))
            # img, bboxes = dataAug.Affine_Transformation()(np.copy(img), np.copy(boxes))     # 平移，左右翻转，上下翻转，错切，旋转
            img, boxes = dataAug.Resize(self.image_shape, False)(np.copy(img), np.copy(boxes))
        else:
            img, boxes = dataAug.Resize(self.image_shape, False)(np.copy(img), np.copy(boxes))
        return img,boxes

    def __len__(self):
        if self.type=='train':
            return len(self.train_truth.keys())
        else:
            return len(self.val_truth.keys())

    def check(self,img,y):
        '''
        box:xyxy->xywh
        img:0~255->0~1，h,w,c->c,h,w
        '''
        img = np.array(img, dtype=np.float)
        tmp_inp = np.transpose(img / 255.0, (2, 0, 1))
        if len(y)!= 0:
            boxes = np.array(y[:, :4], dtype=np.float)
            boxes[:, 0] = boxes[:, 0] / self.image_shape[1]
            boxes[:, 1] = boxes[:, 1] / self.image_shape[0]
            boxes[:, 2] = boxes[:, 2] / self.image_shape[1]
            boxes[:, 3] = boxes[:, 3] / self.image_shape[0]

            boxes = np.maximum(np.minimum(boxes, 1), 0)     # 限制在0~1之间
            boxes[:, 2] = boxes[:, 2] - boxes[:, 0]
            boxes[:, 3] = boxes[:, 3] - boxes[:, 1]
            boxes[:, 0] = boxes[:, 0] + boxes[:, 2] / 2
            boxes[:, 1] = boxes[:, 1] + boxes[:, 3] / 2
            y = np.concatenate([boxes, y[:, -1:]], axis=-1)
            tmp_targets = np.array(y, dtype=np.float)
            return tmp_inp,tmp_targets
        else:
            return tmp_inp,y

    def collate(self,batch):
        '''
        img: numpy->tensor
        box: numpy->[tensor]
        '''
        images = []
        bboxes = []
        for img, box in batch:
            img,box = self.check(img,box)   # box有可能为[]
            images.append(img)
            bboxes.append(box)
        try:
            images = np.array(images,dtype=np.float)
            bboxes = np.array(bboxes)
        except:
            print(images)
        return images, bboxes

    def __getitem__(self, index):
        if self.type == 'train':
            moscia = np.random.randint(0,2)
            if moscia and self.mosaic:
                img_list =[]
                boxes_list = []
                for i in range(4):
                    q = np.random.randint(0,len(self.train_imgs_path))
                    img,boxes = self.__parse_annotation(q)
                    img_list.append(img)
                    boxes_list.append(boxes)
                img, y = dataAug.moscia(self.image_shape)(img_list,boxes_list)
            else:
                img, y = self.__parse_annotation(index)
        else:
            img, y = self.__parse_annotation(index)
        return img, y

def create_dataloader(type='train',Batch_size=4):
    dataset = Yolo_dataset(type)
    dataloader = DataLoader(dataset, batch_size=Batch_size, shuffle=True,
                              num_workers=cfg.num_workers, pin_memory=True, drop_last=True,
                              collate_fn=dataset.collate)
    return dataset,dataloader

if __name__ == '__main__':
    from torch.utils.data import DataLoader
    import random
    class2id = ['heel', 'maid']
    np.random.seed(0)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # train_dataset = Yolo_dataset('train')
    # train_loader = DataLoader(train_dataset, batch_size=2, shuffle=True,
    #                           num_workers=cfg.num_workers, pin_memory=True, drop_last=True,
    #                           collate_fn=train_dataset.collate)
    train_dataset,train_loader = create_dataloader('train',2)
    for i, batch in enumerate(train_loader):
        images = batch[0]
        bboxes = [np.array(i) for i in batch[1]]
        for i in range(images.shape[0]):
            img = images[i]
            tmp_inp = np.transpose(img * 255.0, (1, 2, 0))
            img = np.array(tmp_inp,dtype=np.uint8)
            img = cv2.resize(img,(416,416))
            tl = round(0.002 * max(img.shape[0:2])) + 1
            color = [random.randint(0, 255) for _ in range(3)]
            for box in bboxes[i]:
                box[0:4]=box[0:4]*416
                x1 = box[0]-box[2]/2
                y1 = box[1]-box[3]/2
                x2 = box[0] + box[2] / 2
                y2 = box[1] + box[3] / 2
                c1, c2 = (round(x1), round(y1)), (round(x2), round(y2))
                cv2.rectangle(img, c1, c2, color, thickness=tl)
                label = class2id[int(box[4])]
                if label:
                    tf = max(tl - 1, 1)  # font thickness
                    t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
                    c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
                    cv2.rectangle(img, c1, c2, color, -1)  # filled
                    cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [0, 0, 0], thickness=tf,
                                lineType=cv2.LINE_AA)
            cv2.imshow('change', img)
            cv2.waitKey(0)

    # import random
    # np.random.seed(0)
    # dataset = Yolo_dataset('train')
    # class2id = ['heel', 'maid']
    # for i in range(100):
    #     img, bboxes = dataset.__getitem__(i)
    #     h, w, c = img.shape
    #     tl = round(0.002 * max(img.shape[0:2])) + 1
    #     color = [random.randint(0, 255) for _ in range(3)]
    #     for i in bboxes:
    #         c1,c2 = (round(i[0]),round(i[1])), (round(i[2]),round(i[3]))
    #         cv2.rectangle(img,c1,c2,color,thickness=tl)
    #         label = class2id[int(i[4])]
    #         if label:
    #             tf = max(tl - 1, 1)  # font thickness
    #             t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
    #             c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
    #             cv2.rectangle(img, c1, c2, color, -1)  # filled
    #             cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [0, 0, 0], thickness=tf, lineType=cv2.LINE_AA)
    #     cv2.imshow('change', img)
    #     cv2.waitKey(0)