import os
import torch
import numpy as np
from torch.utils import data
from torch.utils.data.dataloader import default_collate

from reader import image_norm
from function.readTxtFile import *
from datasets.img_Enhance import lm_image_enhance, corp_lm
from function.viewImage import drawPoints
from reader import READER
from tqdm import tqdm

class FileDataSet(data.Dataset):
    def __init__(self, config, transform=None):
        super(FileDataSet, self).__init__()

        if config.train_flag:
            self.flag = 'train'
        else:
            self.flag = 'test'
        self.reader = READER[config[self.flag]['reader_type']](config['label']['bboxSuffix'], config['label']['ptsSuffix'])
        self.datas = self.getFileList(config)
        self.transform = transform
        self.expansion_time = config.label.bbox_enlarge_scale
        self.numbatchs = int(len(self.datas) / config[self.flag].batch_size) + 1
        self.imagesize = config[self.flag].img_size
        self.config = config

        if config.model.in_channels == 3:
            self.is_gray = False
        elif config.model.in_channels == 1:
            self.is_gray = True
        else:
            raise ValueError("in_channels should be 1 or 3.")

    def __len__(self):
        return len(self.datas)

    def __getitem__(self, item):
        data = self.datas[item]
        image = cv2.imread(data.imagePath)
        img_h, img_w, _ = image.shape
        if self.is_gray:
            image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        box = self.box_expansion(data.bbox, self.expansion_time, img_w, img_h)
        image = self.get_img_box(box, image)

        landmarks = np.array(data.lm, dtype=float)
        landmarks = corp_lm(landmarks, box)

        if self.config.train_flag:
            image, landmarks, box = lm_image_enhance(image, landmarks)
        # image = drawPoints(image, landmarks, color=(0, 255, 0))
        # cv2.imshow("image", image)
        # cv2.waitKey(0)

        landmarks = self.get_lm_norm(landmarks, image)
        image = self.transform_cv(image)
        image = image_norm(image)
        return data.imagePath, image, landmarks, box

    def getFileList(self, config):
        datas = []
        filelist = []
        if config.train_flag:
            if isinstance(config.train.dataset, list):
                for f in config.train.dataset:
                    f_l = readFileList(f)
                    filelist += f_l
            else:
                filelist = readFileList(config.train.dataset)
        else:
            if isinstance(config.test.dataset, list):
                for f in config.test.dataset:
                    f_l = readFileList(f)
                    filelist += f_l
            else:
                filelist = readFileList(config.test.dataset)

        print("reading data ...")
        for img in tqdm(filelist):
            label = self.reader.readLabel(img)
            if not label.used:
                continue
            datas.append(label)

        return datas

    def getTestFileList(self, config):
        self.datas.clear()
        filelist = []
        if config.train_flag:
            if isinstance(config.train.dataset, list):
                for f in config.train.dataset:
                    f_l = readFileList(f)
                    filelist += f_l
            else:
                filelist = readFileList(config.train.dataset)
        else:
            if isinstance(config.test.dataset, list):
                for f in config.test.dataset:
                    f_l = readFileList(f)
                    filelist += f_l
            else:
                filelist = readFileList(config.test.dataset)

        for img in filelist:
            label = self.reader.readLabel(img)
            if len(label.bbox) == 0:
                continue
            if len(label.lm) == 0:
                label.lm.append(-1)
            self.datas.append(label)

        self.numbatchs = int(len(self.datas) / self.config[self.flag].batch_size) + 1

    def get_img_box(self, box, image):
        if len(box) != 4:
            return image
        new_box = XYWH_2_XYXY(box)
        return image[int(new_box[1]):int(new_box[3]), int(new_box[0]):int(new_box[2])]

    def get_lm_norm(self, lm, image):
        size = image.shape
        if len(size) == 2:
            img_h, img_w = size
        elif len(size) == 3:
            img_h, img_w, _ = size
        lm = lm.reshape(-1, 2)
        lm = lm / [img_w, img_h]
        return lm

    def box_expansion(self, box, extrtimes, img_w, img_h):
        if len(box) != 4:
            return []
        center_x = box[0] + box[2] / 2
        center_y = box[1] + box[3] / 2
        new_w = extrtimes * box[2]
        new_h = extrtimes * box[3]
        new_x = center_x - new_w / 2
        new_y = center_y - new_h / 2

        new_x = max(new_x, 0)
        new_y = max(new_y, 0)
        new_w = new_w if new_x + new_w <= img_w else img_w - new_x
        new_h = new_h if new_y + new_h <= img_h else img_h - new_y
        return [new_x, new_y, new_w, new_h]

    def collate_fn(self, batch_samples):
        images = []
        landmarks = []
        imgpath = []
        boxs = []
        for path, img, pts, box in batch_samples:
            imgpath.append(path)
            images.append(img)
            landmarks.append(pts)
            boxs.append(box)

        imgpath = default_collate(imgpath)
        images = default_collate(images).contiguous()
        landmarks = default_collate(landmarks).contiguous()
        boxs = torch.tensor(boxs)
        # boxs = default_collate(boxs)
        return imgpath, images, landmarks, boxs

    def transform_cv(self, image):
        image = cv2.resize(image, (self.imagesize, self.imagesize))
        image = torch.tensor(image)
        return image

    def draw_and_save(self, imagePath, savePath, lm):
        image = cv2.imread(imagePath)
        data = self.reader.readLabel(imagePath)
        img_h, img_w, _ = image.shape

        if len(data.bbox) != 0:
            box = self.box_expansion(data.bbox, self.expansion_time, img_w, img_h)
            box = XYWH_2_XYXY(data.bbox)
        else:
            return

        image = image[int(box[1]):int(box[3]), int(box[0]):int(box[2])]
        lm = lm * torch.tensor(data.bbox[2:])

        image = drawPoints(image, lm.tolist(), color=(0, 255, 0))
        savepath = os.path.join(savePath, os.path.basename(imagePath))
        cv2.imwrite(savepath, image)