import torch
import torch.utils.data
from src.const import base_path
import numpy as np
import cv2
from torchvision import transforms
import matplotlib.pyplot as plt
import pandas as pd
from skimage import io, transform
import skimage
from src import const
import json
import os
import nltk
from src.utils import load_json, build_vocab, Vocab
import random


class RandomFlip(object):

    def __call__(self, image):
        h, w = image.shape[:2]
        if np.random.rand() > 0.5:
            image = np.fliplr(image)

        return image


class CenterCrop(object):

    def __init__(self, output_size):
        assert isinstance(output_size, (int, tuple))
        if isinstance(output_size, int):
            self.output_size = (output_size, output_size)
        else:
            assert len(output_size) == 2
            self.output_size = output_size

    def __call__(self, image):
        h, w = image.shape[:2]
        new_h, new_w = self.output_size

        top = int((h - new_h) / 2)
        left = int((w - new_w) / 2)

        image = image[top: top + new_h,
                      left: left + new_w]

        return image


class RandomCrop(object):
    """Crop randomly the image in a sample.

    Args:
        output_size (tuple or int): Desired output size. If int, square crop
            is made.
    """

    def __init__(self, output_size):
        assert isinstance(output_size, (int, tuple))
        if isinstance(output_size, int):
            self.output_size = (output_size, output_size)
        else:
            assert len(output_size) == 2
            self.output_size = output_size

    def __call__(self, image):
        h, w = image.shape[:2]
        new_h, new_w = self.output_size

        top = random.randint(0, h - new_h)
        left = random.randint(0, w - new_w)

        image = image[top: top + new_h,
                      left: left + new_w]

        return image


class RandomRescale(object):

    def __init__(self, output_size_range):
        '''
        output_size_range指将短边缩放到的范围
        '''
        assert isinstance(output_size_range, tuple)
        self.lower_size = int(output_size_range[0])
        self.upper_size = int(output_size_range[1])

    def gen_output_size(self):
        return random.randint(self.lower_size, self.upper_size)

    def __call__(self, image):
        h, w = image.shape[:2]
        output_size = self.gen_output_size()
        if h > w:
            new_h, new_w = output_size * h / w, output_size
        else:
            new_h, new_w = output_size, output_size * w / h

        new_h, new_w = int(new_h), int(new_w)

        img = transform.resize(image, (new_h, new_w), mode='constant')

        return img


class Rescale(object):
    """Rescale the image in a sample to a given size.

    Args:
        output_size (tuple or int): Desired output size. If tuple, output is
            matched to output_size. If int, smaller of image edges is matched
            to output_size keeping aspect ratio the same.
    """

    def __init__(self, output_size):
        assert isinstance(output_size, (int, tuple))
        self.output_size = output_size

    def __call__(self, image):
        h, w = image.shape[:2]
        if isinstance(self.output_size, int):
            if h > w:
                new_h, new_w = self.output_size * h / w, self.output_size
            else:
                new_h, new_w = self.output_size, self.output_size * w / h
        else:
            new_h, new_w = self.output_size

        new_h, new_w = int(new_h), int(new_w)

        img = transform.resize(image, (new_h, new_w), mode='constant')

        # h and w are swapped for landmarks because for images,
        # x and y axes are axis 1 and 0 respectively

        return img


class RandomGamma(object):

    def __init__(self, gamma_range):
        self.lower_gamma = gamma_range[0]
        self.upper_gamma = gamma_range[1]

    def __call__(self, image):
        return skimage.exposure.adjust_gamma(
            image,
            random.uniform(self.lower_gamma, self.upper_gamma)
        )

class MyDataset(torch.utils.data.Dataset):
    
    def __init__(self, mode, lower_size=240, upper_size=300, target_size=224):
        if hasattr(const, "IMAGE_EMBEDDER") and const.IMAGE_EMBEDDER.lower().startswith("inception"):
            print("use inception size....")
            self.lower_size = 300
            self.upper_size = 400
            self.target_size = 299
        else:
            self.lower_size = lower_size
            self.upper_size = upper_size
            self.target_size = target_size
        self.build_image_transform()
        self.mode = mode
        self.build_category()
    
    def build_image_transform(self):
        self.rescale = Rescale((self.target_size, self.target_size))
        self.rescale_lower_size = self.lower_size
        self.rescale_upper_size = self.upper_size
        self.recale_middle_size = (self.rescale_lower_size + self.rescale_upper_size) / 2
        self.random_rescale = RandomRescale((self.rescale_lower_size, self.rescale_upper_size))
        self.random_flip = RandomFlip()
        self.random_crop = RandomCrop((self.target_size, self.target_size))
        self.middle_of_random_rescale = Rescale((self.recale_middle_size, self.recale_middle_size))
        self.center_crop = CenterCrop((self.target_size, self.target_size))
        self.to_tensor = transforms.ToTensor()
        self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                              std=[0.229, 0.224, 0.225])
    def parse_single_image(self, image_path):
        image = skimage.io.imread(image_path)
        if len(image.shape) == 2:
            image = skimage.color.gray2rgb(image)
        elif image.shape[2] == 4:
            print('Warning! rgba Image: {}'.format(image_path))
            image = skimage.color.rgba2rgb(image)
        if self.mode == 'Rescale':
            image = self.rescale(image)
        elif self.mode == 'Random':
            image = self.random_rescale(image)
            image = self.random_crop(image)
            image = self.random_flip(image)
        elif self.mode == 'Center':
            image = self.middle_of_random_rescale(image)
            image = self.center_crop(image)
        else:
            raise Exception("Unknown Mode!")
        # support special numpy type
        image = image.copy()
        image = skimage.img_as_ubyte(image)  # 还是要有这句，不同环境下不一样？总之这里我们必须要0~255的图片
        raw_image = image.copy()
        image = self.to_tensor(image)
        if hasattr(const, "IMAGE_EMBEDDER") and const.IMAGE_EMBEDDER.lower().startswith("inception"):
            pass
        else:
            image = self.normalize(image)
        image = image.float()
        return raw_image, image
   
    def build_category(self):
        if hasattr(const, 'CATEGORY_CLASS_FILE'):
            filepath = os.path.join(const.base_path, const.CATEGORY_CLASS_FILE)
            print("USE Category: {}".format(filepath))
            with open(filepath) as f:
                name2real_id = {}
                cate2real_id = {}
                cnt = 0
                for line in f:
                    line = line.strip()
                    if line == "":
                        continue
                    line = line.split(' ')
                    real_name = line[0].strip()
                    cate_id = int(line[1].strip())
                    if real_name not in name2real_id:
                        real_id = cnt
                        name2real_id[real_name] = real_id
                        cnt += 1
                    else:
                        real_id = name2real_id[real_name]
                    cate2real_id[cate_id] = real_id
                print ("All categories:", name2real_id)
                print ("Number of categories", len(name2real_id))
            const.MAX_CATEGORY_NUM = len(name2real_id)
            self.cagetoryid2real = cate2real_id
        else:
            cate_df = pd.read_csv(os.path.join(const.base_path, 'count_cate.csv'))
            # 0 ~ MAX_CATEGORY_NUM - 2; MAX_CATEGORY_NUM - 1表示other类
            self.cagetoryid2real = {} 
            for i, row in enumerate(cate_df.itertuples()):
                if i < const.MAX_CATEGORY_NUM - 1:
                    self.cagetoryid2real[row.categoryid] = i
                else:
                    if hasattr(const, 'CATEGORY_CLASS_FILE'):  # 这种情况必须不能有例外
                        raise Exception("Not Allowed!!")
                    self.cagetoryid2real[row.categoryid] = const.MAX_CATEGORY_NUM - 1

    
    def to_real_cateid(self, categoryid):
        if categoryid in self.cagetoryid2real:
            return self.cagetoryid2real[categoryid]
        else:
            # print("Warning Unknown Cate id {}".format(categoryid))
            return const.MAX_CATEGORY_NUM - 1

class PolyvoreDataset(MyDataset):

    def __init__(self, js, mode, vocab):
        super(PolyvoreDataset, self).__init__(mode)
        self.js = js
        self.vocab = vocab
        im2type = {}
        im2idx = {}
        idx2im = []
        cnt = 0
        for outfit in self.js:
            for item in outfit['items']:
                im = '%s_%i' % (outfit['set_id'], item['index'])
                im2type[im] = self.to_real_cateid(item['categoryid'])
                im2idx[im] = cnt
                idx2im.append(im)
                cnt += 1
        self.im2type = im2type
        self.im2idx  = im2idx
        self.idx2im = idx2im

    def __len__(self):
        return len(self.js)

    def plot_sample(self, i):
        import matplotlib.pyplot as plt
        sample = self[i]
        raw_images = sample['raw_images']
        for j, raw_image in enumerate(raw_images):
            print('real_type:', sample['types'][j])
            plt.figure(dpi=72)
            plt.imshow(raw_image)

    def __getitem__(self, i):
        sample = self.js[i]

        if len(sample['items']) >= const.OUTFIT_ITEM_PAD_NUM:
            real_item_nums = const.OUTFIT_ITEM_PAD_NUM
        else:
            real_item_nums = len(sample['items'])
        real_item_nums = torch.tensor(real_item_nums)

        raw_images = []
        images = []
        set_id = sample['set_id']
        file_pattern = os.path.join(const.base_path, 'images/' + str(set_id) + '/{}.jpg')
        image_mask = []
        types = []
        for idx in range(const.OUTFIT_ITEM_PAD_NUM):
            if idx >= len(sample['items']):
                item = sample['items'][-1]  # 对于图片用最后一张来pad
                image_mask.append(0)
            else:
                item = sample['items'][idx]
                image_mask.append(1)        
            raw_image, image = self.parse_single_image(file_pattern.format(item['index']))
            types.append(self.to_real_cateid(item['categoryid']))
            raw_images.append(raw_image)
            images.append(torch.unsqueeze(image, dim=0))
        
        # 反向
        b_images = []
        b_image_mask = []
        b_types = []
        num_images = len(sample['items'])
        for idx in range(const.OUTFIT_ITEM_PAD_NUM):
            if idx < num_images:
                b_images.append(images[num_images - idx - 1])
                b_image_mask.append(image_mask[num_images - idx - 1])
                b_types.append(types[num_images - idx - 1])
            else:
                b_images.append(images[0])
                b_image_mask.append(0)
                b_types.append(types[0])
        # 转换为tensor
        images = torch.cat(images, dim=0)
        image_mask = torch.LongTensor(image_mask)
        types = torch.LongTensor(types)
        b_images = torch.cat(b_images, dim=0)
        b_image_mask = torch.LongTensor(b_image_mask)
        b_types = torch.LongTensor(b_types)

        # filter掉未出现的单词
        all_ids = []
        all_lengths = []
        word_detail_mask = []
        word_embedding_divider = []
        for idx in range(const.OUTFIT_ITEM_PAD_NUM):
            if idx >= len(sample['items']):
                name = ""
            else:
                name = sample['items'][idx]['name']
            name = name.split()
            name_ids = []
            word_detail_mask_tmp = []
            p = 0  # 目前指向的词
            cnt = 0  # 选择的词
            for j in range(const.OUTFIT_NAME_PAD_NUM):
                while p < len(name) and name[p] not in self.vocab.word2id:
                    p += 1
                if p < len(name):
                    name_ids.append(self.vocab.to_id(name[p]))
                    cnt += 1
                    word_detail_mask_tmp.append(1)
                else:
                    name_ids.append(self.vocab.to_id("<unk>"))
                    word_detail_mask_tmp.append(0)
                p += 1
            word_detail_mask.append(word_detail_mask_tmp)  # 更细粒度的每个word是否有效
            if idx >= len(sample['items']):
                all_lengths.append(-1)
            else:
                all_lengths.append(cnt)
            all_ids.append(name_ids)

        # (OUTFIT_ITEM_PAD_NUM, OUTFIT_TITLE_PAD_NUM) 表示id
        all_ids = np.array(all_ids)
        '''
        注意，此处我们和原论文保持一致，length >= 2才算作有效
        '''
        word_mask = torch.LongTensor([1 if length >= 2 else 0 for length in all_lengths])
        word_detail_mask = torch.LongTensor(word_detail_mask)
        word_embedding_divider = torch.LongTensor([length if length > 1 else 1 for length in all_lengths])
        # 没啥用，-1表示没有这个item，=0表示有item，但name是空，>0表示item name的长度,形状一定是(OUTFIT_ITEM_PAD_NUM, )
        all_lengths = torch.LongTensor(all_lengths)

        ret = {
            'raw_images': raw_images,
            'images': images,
            'image_mask': image_mask.float(),
            'item_nums': real_item_nums,
            'word_ids': all_ids,
            'word_mask': word_mask.float(),
            'word_lengths': all_lengths,
            'word_detail_mask': word_detail_mask.float(),
            'word_embedding_divider': word_embedding_divider,
            'types': types,
            'b_images': b_images,
            'b_image_mask': b_image_mask.float(),
            'b_types': b_types,
        }
        return ret


class CompatibilityBenchmarkDataset(MyDataset):

    def __init__(self, mode, js, filepath=None):
        super(CompatibilityBenchmarkDataset, self).__init__(mode)
        # load test json
        self.js = js
        im2type = {}
        im2idx = {}
        idx2im = []
        cnt = 0
        for outfit in self.js:
            for item in outfit['items']:
                im = '%s_%i' % (outfit['set_id'], item['index'])
                im2type[im] = self.to_real_cateid(item['categoryid'])
                im2idx[im] = cnt
                idx2im.append(im)
                cnt += 1
        self.im2type = im2type
        self.im2idx  = im2idx
        self.idx2im = idx2im
        
        # build
        self.labels = []
        self.files = []
        if filepath is None:
            filepath = os.path.join(const.base_path, const.VAL_FASHION_COMP_FILE)
        print("Fashion Compatibility Test, Use File: {}".format(filepath))
        with open(filepath) as f:
            for line in f:
                file_tmp = []
                line = line.split()
                self.labels.append(int(line[0]))
                for file in line[1:]:
                    file_tmp.append({
                        'image_name': file,
                        'real_type': im2type[file],
                        'file_path': os.path.join(
                            const.base_path,
                            'images/{}/{}.jpg',
                        ).format(file.split('_')[0], file.split('_')[1])
                    }
                        
                    )
                self.files.append(file_tmp)

    def __getitem__(self, idx):
        sample = self.files[idx]
        # construct im_idx
        im_idx = []
        for item in sample:
            im_idx.append(self.im2idx[item['image_name']])
        # construct images
        raw_images = []
        images = []
        image_mask = []
        types = []
        for idx in range(const.OUTFIT_ITEM_PAD_NUM):
            if idx >= len(sample):
                item = sample[-1]
                image_mask.append(0)
            else:
                item = sample[idx]
                image_mask.append(1)
            raw_image, image = self.parse_single_image(item['file_path'])
            types.append(item['real_type'])
            image = image.unsqueeze(0)
            images.append(image)
            raw_images.append(raw_image)
        # 反向
        b_images = []
        b_image_mask = []
        b_types = []
        num_images = len(sample)
        for idx in range(const.OUTFIT_ITEM_PAD_NUM):
            if idx < num_images:
                b_images.append(images[num_images - idx - 1])
                b_image_mask.append(image_mask[num_images - idx - 1])
                b_types.append(types[num_images - idx - 1])
            else:
                b_images.append(images[0])
                b_image_mask.append(0)
                b_types.append(types[0])
        
        # 转为tensor
        types = torch.LongTensor(types)
        images = torch.cat(images, dim=0)
        image_mask = torch.LongTensor(image_mask)
        b_types = torch.LongTensor(b_types)
        b_images = torch.cat(b_images, dim=0)
        b_image_mask = torch.LongTensor(b_image_mask)
        return {
            'im_idx': im_idx,
            'raw_images': raw_images,
            'images': images,
            'image_mask': image_mask.float(),
            'word_ids': torch.zeros((const.OUTFIT_ITEM_PAD_NUM, const.OUTFIT_NAME_PAD_NUM), dtype=torch.long),
            'word_mask': torch.zeros((const.OUTFIT_ITEM_PAD_NUM,)).float(),
            'word_detail_mask': torch.zeros((const.OUTFIT_ITEM_PAD_NUM, const.OUTFIT_NAME_PAD_NUM)).float(),
            'word_embedding_divider': torch.ones((const.OUTFIT_ITEM_PAD_NUM, )).float(),
            'types': types,
            'b_images': b_images,
            'b_image_mask': b_image_mask.float(),
            'b_types': b_types,
        }

    def __len__(self):
        return len(self.files)
    
    def get_image(self, image_name):
        file_path =  os.path.join(const.base_path, 'images/{}/{}.jpg'.format(
                            image_name.split('_')[0],
                            image_name.split('_')[1],
                        ))
        return self.parse_single_image(file_path)[1].unsqueeze(0)
    
    def plot_sample(self, i):
        import matplotlib.pyplot as plt
        sample = self[i]
        raw_images = sample['raw_images']
        for j, raw_image in enumerate(raw_images):
            print('real_type:', sample['types'][j])
            plt.figure(dpi=72)
            plt.imshow(raw_image)


class FITBBenchmarkDataset(MyDataset):

    def __init__(self, mode, js, filepath=None):
        super(FITBBenchmarkDataset, self).__init__(mode)
        # load test json
        self.js = js
        im2type = {}
        im2idx = {}
        idx2im = []
        cnt = 0
        for outfit in self.js:
            for item in outfit['items']:
                im = '%s_%i' % (outfit['set_id'], item['index'])
                im2type[im] = self.to_real_cateid(item['categoryid'])
                im2idx[im] = cnt
                idx2im.append(im)
                cnt += 1
        self.im2type = im2type
        self.im2idx  = im2idx
        self.idx2im = idx2im
        
        # build
        if filepath is None:
            filepath = os.path.join(const.base_path, const.VAL_FITB_FILE)
        print("FITB Test, Use File: {}".format(filepath))
        self.file_pattern = os.path.join(const.base_path, 'images/{}/{}.jpg')
        with open(filepath) as f:
            self.js = json.load(f)

    def __len__(self):
        return len(self.js)

    def get_image(self, image_name):
        file_path =  os.path.join(const.base_path, 'images/{}/{}.jpg'.format(
                            image_name.split('_')[0],
                            image_name.split('_')[1],
                        ))
        return self.parse_single_image(file_path)[1].unsqueeze(0)
    
    def get_raw_image(self, image_name):
        file_path =  os.path.join(const.base_path, 'images/{}/{}.jpg'.format(
                            image_name.split('_')[0],
                            image_name.split('_')[1],
                        ))
        return self.parse_single_image(file_path)[0]

    def __getitem__(self, idx):
        item = self.js[idx]
        images = [self.get_image(image_name) for image_name in item['question']]
        image_types = [self.im2type[image_name] for image_name in item['question']]
        answer_images = [self.get_image(image_name) for image_name in item['answers']]
        answer_types = [self.im2type[image_name] for image_name in item['answers']]
        images = torch.cat(images, dim=0)
        image_types = torch.LongTensor(image_types)
        answer_images = torch.cat(answer_images, dim=0)
        answer_types = torch.LongTensor(answer_types)
        return {
            'images': images,
            'image_types': image_types,
            'answer_images': answer_images,
            'answer_types': answer_types,
            'blank_position': item['blank_position'],
        }
class SimpleImageDataset(torch.utils.data.Dataset):
    
    def __init__(self, mode, paths):
        self.rescale = Rescale((224, 224))
        self.rescale_lower_size = 240
        self.rescale_upper_size = 280
        self.recale_middle_size = (self.rescale_lower_size + self.rescale_upper_size) / 2
        self.middle_of_random_rescale = Rescale((self.recale_middle_size, self.recale_middle_size))
        self.center_crop = CenterCrop((224, 224))
        self.to_tensor = transforms.ToTensor()
        self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                              std=[0.229, 0.224, 0.225])
        self.mode = mode
        self.paths = paths
    
    def get_image(self, image_path):
        image = io.imread(image_path)
        if len(image.shape) == 2:
            # print('Warning! Gray Image: {}'.format(file_pattern.format(item['index'])))
            image = skimage.color.gray2rgb(image)
        elif image.shape[2] == 4:
            print('Warning! rgba Image: {}'.format(file_pattern.format(item['index'])))
            image = skimage.color.rgba2rgb(image)
        if self.mode == 'Rescale224':
            image = self.rescale(image)
        elif self.mode == 'Center224':
            image = self.middle_of_random_rescale(image)
            image = self.center_crop(image)
        else:
            raise Exception("Unknown Mode!")
        # support special numpy type 
        image = image.copy()
        # 还是要有这句，不同环境下不一样？总之这里我们必须要0~255的图片
        image = skimage.img_as_ubyte(image)
        raw_image = image.copy()
        image = self.to_tensor(image)
        image = self.normalize(image)
        image = image.float()
        image = image.unsqueeze(0)
        return raw_image, image
    
    def __len__(self):
        return len(self.paths)
    
    def __getitem__(self, idx):
        path = self.paths[idx]
        raw_image, image = self.get_image(path)
        return {
            'raw_image': raw_image,
            'images': image,
        }