import torch
import torch.utils.data
from src.const import base_path
import numpy as np
import cv2
from torchvision import transforms
import matplotlib.pyplot as plt
import pandas as pd
from skimage import io, transform
import skimage
from src import const
import json
import os
import nltk
from src.utils import load_json, build_vocab, Vocab
import random


class RandomFlip(object):

    def __call__(self, image):
        h, w = image.shape[:2]
        if np.random.rand() > 0.5:
            image = np.fliplr(image)

        return image


class CenterCrop(object):

    def __init__(self, output_size):
        assert isinstance(output_size, (int, tuple))
        if isinstance(output_size, int):
            self.output_size = (output_size, output_size)
        else:
            assert len(output_size) == 2
            self.output_size = output_size

    def __call__(self, image):
        h, w = image.shape[:2]
        new_h, new_w = self.output_size

        top = int((h - new_h) / 2)
        left = int((w - new_w) / 2)

        image = image[top: top + new_h,
                      left: left + new_w]

        return image


class RandomCrop(object):
    """Crop randomly the image in a sample.

    Args:
        output_size (tuple or int): Desired output size. If int, square crop
            is made.
    """

    def __init__(self, output_size):
        assert isinstance(output_size, (int, tuple))
        if isinstance(output_size, int):
            self.output_size = (output_size, output_size)
        else:
            assert len(output_size) == 2
            self.output_size = output_size

    def __call__(self, image):
        h, w = image.shape[:2]
        new_h, new_w = self.output_size

        top = random.randint(0, h - new_h)
        left = random.randint(0, w - new_w)

        image = image[top: top + new_h,
                      left: left + new_w]

        return image


class RandomRescale(object):

    def __init__(self, output_size_range):
        '''
        output_size_range指将短边缩放到的范围
        '''
        assert isinstance(output_size_range, tuple)
        self.lower_size = int(output_size_range[0])
        self.upper_size = int(output_size_range[1])

    def gen_output_size(self):
        return random.randint(self.lower_size, self.upper_size)

    def __call__(self, image):
        h, w = image.shape[:2]
        output_size = self.gen_output_size()
        if h > w:
            new_h, new_w = output_size * h / w, output_size
        else:
            new_h, new_w = output_size, output_size * w / h

        new_h, new_w = int(new_h), int(new_w)

        img = transform.resize(image, (new_h, new_w), mode='constant')

        return img


class Rescale(object):
    """Rescale the image in a sample to a given size.

    Args:
        output_size (tuple or int): Desired output size. If tuple, output is
            matched to output_size. If int, smaller of image edges is matched
            to output_size keeping aspect ratio the same.
    """

    def __init__(self, output_size):
        assert isinstance(output_size, (int, tuple))
        self.output_size = output_size

    def __call__(self, image):
        h, w = image.shape[:2]
        if isinstance(self.output_size, int):
            if h > w:
                new_h, new_w = self.output_size * h / w, self.output_size
            else:
                new_h, new_w = self.output_size, self.output_size * w / h
        else:
            new_h, new_w = self.output_size

        new_h, new_w = int(new_h), int(new_w)

        img = transform.resize(image, (new_h, new_w), mode='constant')

        # h and w are swapped for landmarks because for images,
        # x and y axes are axis 1 and 0 respectively

        return img


class RandomGamma(object):

    def __init__(self, gamma_range):
        self.lower_gamma = gamma_range[0]
        self.upper_gamma = gamma_range[1]

    def __call__(self, image):
        return skimage.exposure.adjust_gamma(
            image,
            random.uniform(self.lower_gamma, self.upper_gamma)
        )


class PolyvoreDataset(torch.utils.data.Dataset):

    def __init__(self, js, mode, vocab):
        '''
        mode:
            Rescale224：直接缩放到224x224
            Random224：RandomRescale 240~300 -> RANDOM_CROP 224 -> RANDOM_FLIP
            Center224： middle_of_random_rescale -> CENTER_CROP 224
        '''
        self.js = js
        self.rescale = Rescale((224, 224))
        self.rescale_lower_size = 240
        self.rescale_upper_size = 280
        self.recale_middle_size = (self.rescale_lower_size + self.rescale_upper_size) / 2
        self.random_rescale = RandomRescale((self.rescale_lower_size, self.rescale_upper_size))
        self.random_flip = RandomFlip()
        self.random_crop = RandomCrop((224, 224))
        self.middle_of_random_rescale = Rescale((self.recale_middle_size, self.recale_middle_size))
        self.center_crop = CenterCrop((224, 224))
        self.to_tensor = transforms.ToTensor()
        self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                              std=[0.229, 0.224, 0.225])
        self.mode = mode
        self.vocab = vocab

    def __len__(self):
        return len(self.js)

    def plot_sample(self, i):
        import matplotlib.pyplot as plt
        sample = self[i]
        raw_images = sample['raw_images']
        for raw_image in raw_images:
            plt.figure(dpi=72)
            plt.imshow(raw_image)

    def __getitem__(self, i):
        sample = self.js[i]

        if len(sample['items']) >= const.OUTFIT_ITEM_PAD_NUM:
            real_item_nums = const.OUTFIT_ITEM_PAD_NUM
        else:
            real_item_nums = len(sample['items'])
        real_item_nums = torch.tensor(real_item_nums)

        raw_images = []
        images = []
        set_id = sample['set_id']
        file_pattern = os.path.join(const.base_path, 'images/' + str(set_id) + '/{}.jpg')
        image_mask = []
        for idx in range(const.OUTFIT_ITEM_PAD_NUM):
            if idx >= len(sample['items']):
                item = sample['items'][-1]  # 对于图片用最后一张来pad
                image_mask.append(0)
            else:
                item = sample['items'][idx]
                image_mask.append(1)
            image = io.imread(file_pattern.format(item['index']))
            if len(image.shape) == 2:
                # print('Warning! Gray Image: {}'.format(file_pattern.format(item['index'])))
                image = skimage.color.gray2rgb(image)
            elif image.shape[2] == 4:
                print('Warning! rgba Image: {}'.format(file_pattern.format(item['index'])))
                image = skimage.color.rgba2rgb(image)
            if self.mode == 'Rescale224':
                image = self.rescale(image)
            elif self.mode == 'Random224':
                image = self.random_rescale(image)
                image = self.random_crop(image)
                image = self.random_flip(image)
            elif self.mode == 'Center224':
                image = self.middle_of_random_rescale(image)
                image = self.center_crop(image)
            else:
                raise Exception("Unknown Mode!")
            # support special numpy type
            image = image.copy()
            image = skimage.img_as_ubyte(image)  # 还是要有这句，不同环境下不一样？总之这里我们必须要0~255的图片
            raw_images.append(image)
            image = image.copy()
            image = self.to_tensor(image)
            image = self.normalize(image)
            image = image.float()
            images.append(torch.unsqueeze(image, dim=0))
        images = torch.cat(images, dim=0)
        image_mask = torch.LongTensor(image_mask)

        # filter掉未出现的单词
        all_ids = []
        all_lengths = []
        word_detail_mask = []
        word_embedding_divider = []
        for idx in range(const.OUTFIT_ITEM_PAD_NUM):
            if idx >= len(sample['items']):
                name = ""
            else:
                name = sample['items'][idx]['name']
            name = name.split()
            name_ids = []
            word_detail_mask_tmp = []
            p = 0  # 目前指向的词
            cnt = 0  # 选择的词
            for j in range(const.OUTFIT_NAME_PAD_NUM):
                while p < len(name) and name[p] not in self.vocab.word2id:
                    p += 1
                if p < len(name):
                    name_ids.append(self.vocab.to_id(name[p]))
                    cnt += 1
                    word_detail_mask_tmp.append(1)
                else:
                    name_ids.append(self.vocab.to_id("<unk>"))
                    word_detail_mask_tmp.append(0)
                p += 1
            word_detail_mask.append(word_detail_mask_tmp)  # 更细粒度的每个word是否有效
            if idx >= len(sample['items']):
                all_lengths.append(-1)
            else:
                all_lengths.append(cnt)
            all_ids.append(name_ids)
        # (OUTFIT_ITEM_PAD_NUM, OUTFIT_TITLE_PAD_NUM) 表示id
        all_ids = np.array(all_ids)
        # 表示至少有一个有意义的单词，如果没有的话这个图片不应该去算他和text embedding的loss的
        word_mask = torch.LongTensor([1 if length > 0 else 0 for length in all_lengths])
        # (OUTFIT_ITEM_PAD_NUM, OUTFIT_TITLE_PAD_NUM) 0 or 1表示每个id是否有效，比上面那个更细粒度
        word_detail_mask = torch.LongTensor(word_detail_mask)
        # (OUTFIT_ITEM_PAD_NUM, )：如果没有item或没有name都是1，如果有item有name则就是那个长度
        word_embedding_divider = torch.LongTensor([length if length > 1 else 1 for length in all_lengths])
        # -1表示没有这个item，=0表示有item，但name是空，>0表示item name的长度
        # 形状一定是(OUTFIT_ITEM_PAD_NUM, )
        all_lengths = torch.LongTensor(all_lengths)

        ret = {
            'raw_images': raw_images,
            'images': images,
            'image_mask': image_mask.float(),
            'item_nums': real_item_nums,
            'word_ids': all_ids,
            'word_mask': word_mask.float(),
            'word_lengths': all_lengths,
            'word_detail_mask': word_detail_mask.float(),
            'word_embedding_divider': word_embedding_divider,
        }
        return ret


class CompatibilityBenchmarkDataset(torch.utils.data.Dataset):

    def __init__(self, mode, filepath=None):
        self.labels = []
        self.files = []
        if filepath is None:
            filepath = os.path.join(const.base_path, const.VAL_FASHION_COMP_FILE)
        print("Fashion Compatibility Test, Use File: {}".format(filepath))
        with open(filepath) as f:
            for line in f:
                file_tmp = []
                line = line.split()
                self.labels.append(int(line[0]))
                for file in line[1:]:
                    file_tmp.append(
                        os.path.join(
                            const.base_path,
                            'images/{}/{}.jpg',
                        ).format(file.split('_')[0], file.split('_')[1])
                    )
                self.files.append(file_tmp)
        self.rescale = Rescale((224, 224))
        self.rescale_lower_size = 240
        self.rescale_upper_size = 280
        self.recale_middle_size = (self.rescale_lower_size + self.rescale_upper_size) / 2
        self.middle_of_random_rescale = Rescale((self.recale_middle_size, self.recale_middle_size))
        self.center_crop = CenterCrop((224, 224))
        self.to_tensor = transforms.ToTensor()
        self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        self.mode = mode

    def __getitem__(self, idx):
        sample = self.files[idx]
        raw_images = []
        images = []
        image_mask = []
        for idx in range(const.OUTFIT_ITEM_PAD_NUM):
            if idx >= len(sample):
                item = sample[-1]
                image_mask.append(0)
            else:
                item = sample[idx]
                image_mask.append(1)
            image = io.imread(item)
            if len(image.shape) == 2:
                # print('Warning! Gray Image: {}'.format(file_pattern.format(item['index'])))
                image = skimage.color.gray2rgb(image)
            elif image.shape[2] == 4:
                print('Warning! rgba Image: {}'.format(file_pattern.format(item['index'])))
                image = skimage.color.rgba2rgb(image)
            if self.mode == 'Rescale224':
                image = self.rescale(image)
            elif self.mode == 'Center224':
                image = self.middle_of_random_rescale(image)
                image = self.center_crop(image)
            else:
                raise Exception("Unknown Mode!")
            # support special numpy type
            image = image.copy()
            # 还是要有这句，不同环境下不一样？总之这里我们必须要0~255的图片
            image = skimage.img_as_ubyte(image)
            raw_images.append(image)
            image = image.copy()
            image = self.to_tensor(image)
            image = self.normalize(image)
            image = image.float()
            images.append(torch.unsqueeze(image, dim=0))
        images = torch.cat(images, dim=0)
        image_mask = torch.LongTensor(image_mask)
        return {
            'raw_images': raw_images,
            'images': images,
            'image_mask': image_mask.float(),
            # fake word
            'word_ids': torch.zeros((const.OUTFIT_ITEM_PAD_NUM, const.OUTFIT_NAME_PAD_NUM), dtype=torch.long),
            'word_mask': torch.zeros((const.OUTFIT_ITEM_PAD_NUM,)).float(),
            'word_detail_mask': torch.zeros((const.OUTFIT_ITEM_PAD_NUM, const.OUTFIT_NAME_PAD_NUM)).float(),
            'word_embedding_divider': torch.ones((const.OUTFIT_ITEM_PAD_NUM, )).float(),
        }

    def __len__(self):
        return len(self.files)


class FITBDataset(torch.utils.data.Dataset):

    def __init__(self, mode, filepath=None):
        if filepath is None:
            filepath = os.path.join(const.base_path, const.VAL_FITB_FILE)
        print("FITB Test, Use File: {}".format(filepath))
        self.rescale = Rescale((224, 224))
        self.rescale_lower_size = 240
        self.rescale_upper_size = 280
        self.recale_middle_size = (self.rescale_lower_size + self.rescale_upper_size) / 2
        self.random_rescale = RandomRescale((self.rescale_lower_size, self.rescale_upper_size))
        self.random_flip = RandomFlip()
        self.random_crop = RandomCrop((224, 224))
        self.middle_of_random_rescale = Rescale((self.recale_middle_size, self.recale_middle_size))
        self.center_crop = CenterCrop((224, 224))
        self.to_tensor = transforms.ToTensor()
        self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                              std=[0.229, 0.224, 0.225])
        self.mode = mode
        self.file_pattern = os.path.join(const.base_path, 'images/{}/{}.jpg')
        with open(filepath) as f:
            self.js = json.load(f)

    def get_file(self, name):
        return self.file_pattern.format(name.split('_')[0], name.split('_')[1])

    def __len__(self):
        return len(self.js)

    def get_image(self, name):
        image_path = self.get_file(name)
        image = io.imread(image_path)
        if len(image.shape) == 2:
            # print('Warning! Gray Image: {}'.format(file_pattern.format(item['index'])))
            image = skimage.color.gray2rgb(image)
        elif image.shape[2] == 4:
            print('Warning! rgba Image: {}'.format(file_pattern.format(item['index'])))
            image = skimage.color.rgba2rgb(image)
        if self.mode == 'Rescale224':
            image = self.rescale(image)
        elif self.mode == 'Center224':
            image = self.middle_of_random_rescale(image)
            image = self.center_crop(image)
        else:
            raise Exception("Unknown Mode!")
        # support special numpy type
        image = image.copy()
        # 还是要有这句，不同环境下不一样？总之这里我们必须要0~255的图片
        image = skimage.img_as_ubyte(image)
        image = self.to_tensor(image)
        image = self.normalize(image)
        image = image.float()
        image = torch.unsqueeze(image, dim=0)
        return image

    def __getitem__(self, idx):
        item = self.js[idx]
        images = [self.get_image(image_name) for image_name in item['question']]
        answer_images = [self.get_image(image_name) for image_name in item['answers']]
        images = torch.cat(images, dim=0)
        answer_images = torch.cat(answer_images, dim=0)
        return {
            'images': images,
            'answer_images': answer_images,
        }

class SimpleImageDataset(torch.utils.data.Dataset):
    
    def __init__(self, mode, paths):
        self.rescale = Rescale((224, 224))
        self.rescale_lower_size = 240
        self.rescale_upper_size = 280
        self.recale_middle_size = (self.rescale_lower_size + self.rescale_upper_size) / 2
        self.middle_of_random_rescale = Rescale((self.recale_middle_size, self.recale_middle_size))
        self.center_crop = CenterCrop((224, 224))
        self.to_tensor = transforms.ToTensor()
        self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                              std=[0.229, 0.224, 0.225])
        self.mode = mode
        self.paths = paths
    
    def get_image(self, image_path):
        image = io.imread(image_path)
        if len(image.shape) == 2:
            # print('Warning! Gray Image: {}'.format(file_pattern.format(item['index'])))
            image = skimage.color.gray2rgb(image)
        elif image.shape[2] == 4:
            print('Warning! rgba Image: {}'.format(file_pattern.format(item['index'])))
            image = skimage.color.rgba2rgb(image)
        if self.mode == 'Rescale224':
            image = self.rescale(image)
        elif self.mode == 'Center224':
            image = self.middle_of_random_rescale(image)
            image = self.center_crop(image)
        else:
            raise Exception("Unknown Mode!")
        # support special numpy type 
        image = image.copy()
        # 还是要有这句，不同环境下不一样？总之这里我们必须要0~255的图片
        image = skimage.img_as_ubyte(image)
        raw_image = image.copy()
        image = self.to_tensor(image)
        image = self.normalize(image)
        image = image.float()
        image = image.unsqueeze(0)
        return raw_image, image
    
    def __len__(self):
        return len(self.paths)
    
    def __getitem__(self, idx):
        path = self.paths[idx]
        raw_image, image = self.get_image(path)
        return {
            'raw_image': raw_image,
            'images': image,
        }