import torch
import torch.utils.data
from src.const import base_path
import numpy as np
import cv2
from torchvision import transforms
import matplotlib.pyplot as plt
import pandas as pd
from skimage import io, transform
import skimage
from src import const
import json
import os
import nltk
from src.utils import load_json, build_vocab, Vocab
import random
from sklearn.metrics import roc_auc_score

class RandomFlip(object):

    def __call__(self, image):
        h, w = image.shape[:2]
        if np.random.rand() > 0.5:
            image = np.fliplr(image)

        return image


class CenterCrop(object):

    def __init__(self, output_size):
        assert isinstance(output_size, (int, tuple))
        if isinstance(output_size, int):
            self.output_size = (output_size, output_size)
        else:
            assert len(output_size) == 2
            self.output_size = output_size

    def __call__(self, image):
        h, w = image.shape[:2]
        new_h, new_w = self.output_size

        top = int((h - new_h) / 2)
        left = int((w - new_w) / 2)

        image = image[top: top + new_h,
                      left: left + new_w]

        return image


class RandomCrop(object):
    """Crop randomly the image in a sample.

    Args:
        output_size (tuple or int): Desired output size. If int, square crop
            is made.
    """

    def __init__(self, output_size):
        assert isinstance(output_size, (int, tuple))
        if isinstance(output_size, int):
            self.output_size = (output_size, output_size)
        else:
            assert len(output_size) == 2
            self.output_size = output_size

    def __call__(self, image):
        h, w = image.shape[:2]
        new_h, new_w = self.output_size

        top = random.randint(0, h - new_h)
        left = random.randint(0, w - new_w)

        image = image[top: top + new_h,
                      left: left + new_w]

        return image


class RandomRescale(object):

    def __init__(self, output_size_range):
        '''
        output_size_range指将短边缩放到的范围
        '''
        assert isinstance(output_size_range, tuple)
        self.lower_size = int(output_size_range[0])
        self.upper_size = int(output_size_range[1])

    def gen_output_size(self):
        return random.randint(self.lower_size, self.upper_size)

    def __call__(self, image):
        h, w = image.shape[:2]
        output_size = self.gen_output_size()
        if h > w:
            new_h, new_w = output_size * h / w, output_size
        else:
            new_h, new_w = output_size, output_size * w / h

        new_h, new_w = int(new_h), int(new_w)

        img = transform.resize(image, (new_h, new_w), mode='constant')

        return img


class Rescale(object):
    """Rescale the image in a sample to a given size.

    Args:
        output_size (tuple or int): Desired output size. If tuple, output is
            matched to output_size. If int, smaller of image edges is matched
            to output_size keeping aspect ratio the same.
    """

    def __init__(self, output_size):
        assert isinstance(output_size, (int, tuple))
        self.output_size = output_size

    def __call__(self, image):
        h, w = image.shape[:2]
        if isinstance(self.output_size, int):
            if h > w:
                new_h, new_w = self.output_size * h / w, self.output_size
            else:
                new_h, new_w = self.output_size, self.output_size * w / h
        else:
            new_h, new_w = self.output_size

        new_h, new_w = int(new_h), int(new_w)

        img = transform.resize(image, (new_h, new_w), mode='constant')

        # h and w are swapped for landmarks because for images,
        # x and y axes are axis 1 and 0 respectively

        return img


class RandomGamma(object):

    def __init__(self, gamma_range):
        self.lower_gamma = gamma_range[0]
        self.upper_gamma = gamma_range[1]

    def __call__(self, image):
        return skimage.exposure.adjust_gamma(
            image,
            random.uniform(self.lower_gamma, self.upper_gamma)
        )


class MyDataset(torch.utils.data.Dataset):
    
    def __init__(self, mode, lower_size=125, upper_size=150, target_size=112):
        self.lower_size = lower_size
        self.upper_size = upper_size
        self.target_size = target_size
        self.build_image_transform()
        self.mode = mode
        self.build_category()
    
    def build_image_transform(self):
        self.rescale = Rescale((self.target_size, self.target_size))
        self.rescale_lower_size = self.lower_size
        self.rescale_upper_size = self.upper_size
        self.recale_middle_size = (self.rescale_lower_size + self.rescale_upper_size) / 2
        self.random_rescale = RandomRescale((self.rescale_lower_size, self.rescale_upper_size))
        self.random_flip = RandomFlip()
        self.random_crop = RandomCrop((self.target_size, self.target_size))
        self.middle_of_random_rescale = Rescale((self.recale_middle_size, self.recale_middle_size))
        self.center_crop = CenterCrop((self.target_size, self.target_size))
        self.to_tensor = transforms.ToTensor()
        self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                              std=[0.229, 0.224, 0.225])
    def parse_single_image(self, image_path):
        image = skimage.io.imread(image_path)
        if len(image.shape) == 2:
            image = skimage.color.gray2rgb(image)
        elif image.shape[2] == 4:
            print('Warning! rgba Image: {}'.format(image_path))
            image = skimage.color.rgba2rgb(image)
        if self.mode == 'Rescale':
            image = self.rescale(image)
        elif self.mode == 'Random':
            image = self.random_rescale(image)
            image = self.random_crop(image)
            image = self.random_flip(image)
        elif self.mode == 'Center':
            image = self.middle_of_random_rescale(image)
            image = self.center_crop(image)
        else:
            raise Exception("Unknown Mode!")
        # support special numpy type
        image = image.copy()
        image = skimage.img_as_ubyte(image)  # 还是要有这句，不同环境下不一样？总之这里我们必须要0~255的图片
        raw_image = image.copy()
        image = self.to_tensor(image)
        image = self.normalize(image)
        image = image.float()
        return raw_image, image
   
    def build_category(self):
        if hasattr(const, 'CATEGORY_CLASS_FILE'):
            filepath = os.path.join(const.base_path, const.CATEGORY_CLASS_FILE)
            print("USE Category: {}".format(filepath))
            with open(filepath) as f:
                name2real_id = {}
                cate2real_id = {}
                cnt = 0
                for line in f:
                    line = line.strip()
                    if line == "":
                        continue
                    line = line.split(' ')
                    real_name = line[0].strip()
                    cate_id = int(line[1].strip())
                    if real_name not in name2real_id:
                        real_id = cnt
                        name2real_id[real_name] = real_id
                        cnt += 1
                    else:
                        real_id = name2real_id[real_name]
                    cate2real_id[cate_id] = real_id
                print ("All categories:", name2real_id)
                print ("Number of categories", len(name2real_id))
            const.MAX_CATEGORY_NUM = len(name2real_id)
            self.cagetoryid2real = cate2real_id
        else:
            cate_df = pd.read_csv(os.path.join(const.base_path, 'count_cate.csv'))
            # 0 ~ MAX_CATEGORY_NUM - 2; MAX_CATEGORY_NUM - 1表示other类
            self.cagetoryid2real = {} 
            for i, row in enumerate(cate_df.itertuples()):
                if i < const.MAX_CATEGORY_NUM - 1:
                    self.cagetoryid2real[row.categoryid] = i
                else:
                    if hasattr(const, 'CATEGORY_CLASS_FILE'):  # 这种情况必须不能有例外
                        raise Exception("Not Allowed!!")
                    self.cagetoryid2real[row.categoryid] = const.MAX_CATEGORY_NUM - 1

    
    def to_real_cateid(self, categoryid):
        if categoryid in self.cagetoryid2real:
            return self.cagetoryid2real[categoryid]
        else:
            # print("Warning Unknown Cate id {}".format(categoryid))
            return const.MAX_CATEGORY_NUM - 1


class PolyvoreDataset(MyDataset):

    def __init__(self, js, mode, vocab):
        '''
        mode:
            Rescale224：直接缩放到224x224
            Random224：RandomRescale 240~300 -> RANDOM_CROP 224 -> RANDOM_FLIP
            Center224： middle_of_random_rescale -> CENTER_CROP 224
        '''
        super(PolyvoreDataset, self).__init__(mode)
        self.js = js
        self.vocab = vocab
    
    def parse_text(self, text):
        text = text.split()
        text_ids = []
        text_mask = []
        p = 0  # 目前指向的词
        cnt = 0  # 选择的词
        for j in range(const.OUTFIT_NAME_PAD_NUM):
            while p < len(text) and text[p] not in self.vocab.word2id:
                p += 1
            if p < len(text):
                text_ids.append(self.vocab.to_id(text[p]))
                cnt += 1
                text_mask.append(1)
            else:
                text_ids.append(self.vocab.to_id("<unk>"))
                text_mask.append(0)
            p += 1
        text_ids = torch.LongTensor(text_ids)
        text_mask = torch.FloatTensor(text_mask)
        if cnt > 1:
            embedding_divider = torch.tensor(float(cnt))
        else:
            embedding_divider = torch.tensor(1.)
        return text_ids, text_mask, embedding_divider


class PolyvoreTripletDataset(PolyvoreDataset):
    def __init__(self, js, mode, vocab):
        '''
        mode:
            Rescale224：直接缩放到224x224
            Random224：RandomRescale 240~280 -> RANDOM_CROP 224 -> RANDOM_FLIP
            Center224： middle_of_random_rescale -> CENTER_CROP 224
        '''
        super(PolyvoreTripletDataset, self).__init__(js, mode, vocab)
        self.build_pos_pairs()
    
    def build_pos_pairs(self):
        print('building positive pairs...')
        im2type = {}
        imnames = set()
        for outfit in self.js:
            for item in outfit['items']:
                im = '%s_%i' % (outfit['set_id'], item['index'])
                im2type[im] = self.to_real_cateid(item['categoryid'])
                imnames.add(im)
        im2index = {}
        for index, im in enumerate(imnames):
            im2index[im] = index
        self.imnames = list(imnames)
        self.im2type = im2type

        pos_pairs = []
        type_outfit2im = {}
        outfit2im = {}
        for i, outfit in enumerate(self.js):
            items = outfit['items']
            cnt = len(items)
            outfit_id = outfit['set_id']
            for j in range(cnt):
                item_type = self.to_real_cateid(items[j]['categoryid'])
                if item_type not in type_outfit2im:
                    type_outfit2im[item_type] = {}
                if outfit_id not in type_outfit2im[item_type]:
                    type_outfit2im[item_type][outfit_id] = []
                type_outfit2im[item_type][outfit_id].append((i, j))
                if outfit_id not in outfit2im:
                    outfit2im[outfit_id] = []
                outfit2im[outfit_id].append((i, j))
                '''
                注意，此处我的实现和原论文不同，j和k倒过来应该是完全ok的，
                满足j不等于k就可以了。
                '''
                for k in range(cnt):
                    if (j != k):
                        pos_pairs.append([i, j, k])

        self.pos_pairs = pos_pairs
        self.type_outfit2im = type_outfit2im
        self.outfit2im = outfit2im
        '''加速'''
        self.type2outfit_list = {}
        for item_type in self.type_outfit2im:
            self.type2outfit_list[item_type] = list(self.type_outfit2im[item_type].keys())
        self.outfit_list = list(self.outfit2im.keys())


        
    def parse_item(self, data_index, index):
        outfit_id = self.js[data_index]['set_id']
        item = self.js[data_index]['items'][index]
        image_path = os.path.join(const.base_path, 'images/{}/{}.jpg'.format(outfit_id, item['index']))
        raw_image, image = self.parse_single_image(image_path)
        text_ids, text_mask, embedding_divider = self.parse_text(item['name'])
        item_type = self.to_real_cateid(item['categoryid'])
        return raw_image, image, text_ids, text_mask, embedding_divider, item_type
    
    def sample_negative(self, data_index, item_type):
        outfit_id = self.js[data_index]['set_id']
        if const.NEGATIVE_SAMPLE_WITH_TYPE:
            '''
            SAMPLE和item_type type一致的
            '''
            choice = np.random.choice(self.type2outfit_list[item_type])
            while choice == outfit_id:
                choice = np.random.choice(self.type2outfit_list[item_type])
            items = self.type_outfit2im[item_type][choice]
            item_index = np.random.choice(range(len(items)))
        else:
            '''
            随机SAMPLE
            '''
            choice = np.random.choice(self.outfit_list)
            while choice == outfit_id:
                choice = np.random.choice(self.outfit_list)
            items = self.outfit2im[choice]
            item_index = np.random.choice(range(len(items)))
        return items[item_index]
    
    def __getitem__(self, index):
        '''
        若NEGATIVE_SAMPLE_WITH_TYPE = True，那么2和3type一致。其他type不保证。
        '''
        data_index, anchor_index, pos_index = self.pos_pairs[index]
        raw_image1, image1, text_ids1, text_mask1, embedding_divider1, item_type1 = self.parse_item(data_index, anchor_index)
        raw_image2, image2, text_ids2, text_mask2, embedding_divider2, item_type2 = self.parse_item(data_index, pos_index)
        neg_data_index, neg_index = self.sample_negative(data_index, item_type2)
        raw_image3, image3, text_ids3, text_mask3, embedding_divider3, item_type3 = self.parse_item(neg_data_index, neg_index)
        return (raw_image1, image1, text_ids1, text_mask1, embedding_divider1, item_type1,
                raw_image2, image2, text_ids2, text_mask2, embedding_divider2, item_type2,
                raw_image3, image3, text_ids3, text_mask3, embedding_divider3, item_type3,
               )
    
    def __len__(self):
        return len(self.pos_pairs)
    
    def plot_sample(self, index):
        (raw_image1, image1, text_ids1, text_mask1, embedding_divider1, item_type1,
         raw_image2, image2, text_ids2, text_mask2, embedding_divider2, item_type2,
         raw_image3, image3, text_ids3, text_mask3, embedding_divider3, item_type3,
        ) = self[index]
        print(item_type1)
        plt.imshow(raw_image1)
        plt.show()
        print(item_type2)
        plt.imshow(raw_image2)
        plt.show()
        print(item_type3)
        plt.imshow(raw_image3)
        plt.show()

class CompatibilityBenchmarkDataset(MyDataset):

    def __init__(self, mode, js, filepath=None):
        super(CompatibilityBenchmarkDataset, self).__init__(mode)
        # load test json
        self.js = js
        im2type = {}
        imnames = set()
        for outfit in self.js:
            for item in outfit['items']:
                im = '%s_%i' % (outfit['set_id'], item['index'])
                im2type[im] = self.to_real_cateid(item['categoryid'])
                imnames.add(im)
        im2index = {}
        for index, im in enumerate(imnames):
            im2index[im] = index
        self.imnames = list(imnames)
        self.im2type = im2type
        
        # build
        self.labels = []
        self.files = []
        self.cates = []
        if filepath is None:
            filepath = os.path.join(const.base_path, const.VAL_FASHION_COMP_FILE)
        print("Fashion Compatibility Test, Use File: {}".format(filepath))
        with open(filepath) as f:
            for line in f:
                file_tmp = []
                cate_tmp = []
                line = line.split()
                self.labels.append(int(line[0]))
                for file in line[1:]:
                    file_tmp.append(
                        file
                    )
                    cate_tmp.append(im2type[file])
                self.files.append(file_tmp)
                self.cates.append(cate_tmp)
    
    def calculate(self, net):
        with torch.no_grad():
            net.eval()
            scores = []
            for i in range(len(self.files)):
                if (i + 1) % 10 == 0:
                    print("Val Comp Step [{} / {}]".format(i + 1, len(self.files)))
                images = [
                    self.parse_single_image(
                        os.path.join(const.base_path, 'images/{}/{}.jpg'.format(
                            image_name.split('_')[0],
                            image_name.split('_')[1],
                        ))
                    )[1].unsqueeze(0)
                    for image_name in self.files[i]
                ]
                types = self.cates[i]
                images = torch.cat(images).to(const.device)
                score = net.cal_compatibility_score(images, types)
                scores.append(score)
            scores = np.array(scores)
            return roc_auc_score(self.labels, 1 - scores)
        

    def __len__(self):
        return len(self.files)


class FITBBenchmarkDataset(MyDataset):

    def __init__(self, mode, test_js, filepath=None):
        super(FITBBenchmarkDataset, self).__init__(mode)
        # load test json
        im2type = {}
        imnames = set()
        for outfit in test_js:
            for item in outfit['items']:
                im = '%s_%i' % (outfit['set_id'], item['index'])
                im2type[im] = self.to_real_cateid(item['categoryid'])
                imnames.add(im)
        im2index = {}
        for index, im in enumerate(imnames):
            im2index[im] = index
        self.imnames = list(imnames)
        self.im2type = im2type
        
        # build
        filepath = os.path.join(const.base_path, const.VAL_FITB_FILE)
        with open(filepath) as f:
            print("FITB use file: {}".format(filepath))
            self.js = json.load(f)
    
    def get_image(self, image_name):
        file_path =  os.path.join(const.base_path, 'images/{}/{}.jpg'.format(
                            image_name.split('_')[0],
                            image_name.split('_')[1],
                        ))
        return self.parse_single_image(file_path)[1].unsqueeze(0)
    
    def get_raw_image(self, image_name):
        file_path =  os.path.join(const.base_path, 'images/{}/{}.jpg'.format(
                            image_name.split('_')[0],
                            image_name.split('_')[1],
                        ))
        return self.parse_single_image(file_path)[0]
    
    def calculate(self, net):
        with torch.no_grad():
            net.eval()
            correct = 0.
            cnt = 0.
            for i in range(len(self.js)):
                if (i + 1) % 10 == 0:
                    print("Val FITB Step [{} / {}]".format(i + 1, len(self.js)))
                item = self.js[i]
                images = torch.cat([self.get_image(image_name) for image_name in item['question']]).to(const.device)
                answer_images = torch.cat([self.get_image(image_name) for image_name in item['answers']]).to(const.device)
                types = [self.im2type[image_name] for image_name in item['question']]
                answer_types = [self.im2type[image_name] for image_name in item['answers']]
                index, dists = net.cal_ans(images, types, answer_images, answer_types)
                if index == 0:
                    correct += 1.
                cnt += 1
            return correct / cnt
        