# coding=utf8
from __future__ import division
import os
import torch
import torch.utils.data as data
import numpy as np
import pandas as pd
import cv2
from collections import OrderedDict
from itertools import chain
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import DataLoaderIter
from torch.utils.data.dataloader import default_collate
# from FSdataset import attr2idx_map, idx2attr_map, attr2catidx_map, label_map
cv2.setNumThreads(0)

class multiDataLoader(object):
    def __init__(self, *args, **kwargs):
        '''
        :param args:  list of dataset
        :param kwargs:
        {
            'bs': batch-size, default 8,
            'num_workers': num_workers of each loader, default 1
            'shuffle': whether shuffle, default False,
            'pin_memory': pin memory for each loader, default False,
            'collate_fn': collate_fn for each loader, default None.
            'start_epochs': start epoch of each loader, default 0
            'start_step': start step, default 0
        }  all this values in dict should be a list of same length or a single object
            If an object, then all loader share the same args.
        '''
        self.num_datasets = len(args)
        self.data_sets = args
        self.start_step = kwargs.get('start_step', -1)


        # batch_sizes
        self.bs = kwargs.get('bs', 1)
        assert isinstance(self.bs, int) or isinstance(self.bs, list), \
            'bs should be a int or a list of int'
        if isinstance(self.bs, int):
            self.bs = [self.bs] * self.num_datasets
        else:
            assert all(isinstance(elem, int) for elem in self.bs), 'bs should be a int or a list of int'
            assert len(self.bs) == self.num_datasets, 'list of bs should have length of num of datasets'

        # num_workers
        self.num_workers = kwargs.get('num_workers', 1)
        assert isinstance(self.num_workers, int) or isinstance(self.num_workers, list), \
            'num_workers should be a int or a list of int'
        if isinstance(self.num_workers, int):
            self.num_workers = [self.num_workers] * self.num_datasets
        else:
            assert all(isinstance(elem, int) for elem in self.num_workers), 'num_workers should be a int or a list of int'
            assert len(self.num_workers) == self.num_datasets, 'list of num_workers should have length of num of datasets'

        # shuffle
        self.shuffle = kwargs.get('shuffle', False)
        assert isinstance(self.shuffle, bool) or isinstance(self.shuffle, list), \
            'shuffle should be a int or a list of bool'
        if isinstance(self.shuffle, bool):
            self.shuffle = [self.shuffle] * self.num_datasets
        else:
            assert all(isinstance(elem, bool) for elem in self.shuffle), 'shuffle should be a bool or a list of bool'
            assert len(self.shuffle) == self.num_datasets, 'list of shuffle should have length of num of datasets'

        # pin_memory
        self.pin_memory = kwargs.get('pin_memory', False)
        assert isinstance(self.pin_memory, bool) or isinstance(self.pin_memory, list), \
            'pin_memory should be a int or a list of bool'
        if isinstance(self.pin_memory, bool):
            self.pin_memory = [self.pin_memory] * self.num_datasets
        else:
            assert all(isinstance(elem, bool) for elem in self.pin_memory), 'pin_memory should be a bool or a list of bool'
            assert len(self.pin_memory) == self.num_datasets, 'list of pin_memory should have length of num of datasets'

        # collate_fn
        self.collate_fn = kwargs.get('collate_fn', default_collate)
        assert callable(self.collate_fn) or isinstance(self.collate_fn, list), \
            'collate_fn should be a int or a list of function'
        if callable(self.collate_fn):
            self.collate_fn = [self.collate_fn] * self.num_datasets
        else:
            assert all(callable(elem) for elem in self.collate_fn), 'collate_fn should be a bool or a list of function'
            assert len(self.collate_fn) == self.num_datasets, 'list of collate_fn should have length of num of datasets'

        # batch_sizes
        self.vir_start_epochs = kwargs.get('vir_start_epochs', 0)

        self.data_loaders = {}
        self.data_iters = {}
        for i in xrange(self.num_datasets):
            self.data_loaders[i] = DataLoader(
                dataset=self.data_sets[i],
                batch_size=self.bs[i],
                shuffle=self.shuffle[i],
                pin_memory=self.pin_memory[i],
                collate_fn=self.collate_fn[i]
            )
            self.data_iters[i] =  DataLoaderIter(self.data_loaders[i])

        self.vir_bs = kwargs.get('vir_bs', self.bs[0])
        self.vir_datalen = kwargs.get('vir_datalen', len(self.data_sets[0]))
        self.vir_bc_per_epoch = self.vir_datalen // self.vir_bs

        self.Iterations = {i: 0 for i in xrange(self.num_datasets)}
        self.Step = self.start_step
        self.VirEpoch = self.vir_start_epochs

    def build(self, i):
        self.data_loaders[i] = DataLoader(
            dataset=self.data_sets[i],
            batch_size=self.bs[i],
            shuffle=self.shuffle[i],
            pin_memory=self.pin_memory[i],
            collate_fn=self.collate_fn[i]
        )
        self.data_iters[i] = DataLoaderIter(self.data_loaders[i])

    def next(self, i):
        try:
            batch = self.data_iters[i].next()
            self.Iterations[i] += 1
            self.Step += 1
            if self.Step % self.vir_bc_per_epoch == 0:
                self.VirEpoch += 1
            return batch

        except StopIteration:
            self.build(i)
            self.Iterations[i] = 1
            self.Step += 1
            if self.Step % self.vir_bc_per_epoch == 0:
                self.VirEpoch += 1
            batch = self.data_iters[i].next()
            return batch


if __name__ == '__main__':
    from sklearn.model_selection import train_test_split
    from LMdataset import LMdata
    from LMaug import *
    from utils.preprocessing import *

    resize_rate = 1
    class trainAug(object):
        def __init__(self, input_size):
            self.augment = Compose([
                RandomRotate(angles=[-20., 20.], bound=False),
                ResizeImg(size=input_size),
                RandomHflip(),
                GenGauMask(r=resize_rate),
                Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
            ])

        def __call__(self, *args):
            return self.augment(*args)


    class valAug(object):
        def __init__(self,input_size):
            self.augment = Compose([
                ResizeImg(size=input_size),
                Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
            ])

        def __call__(self, *args):
            return self.augment(*args)



    os.environ["CUDA_VISIBLE_DEVICES"] = "2"

    img_root = "/media/gserver/data/landmark/rawdata"
    img_root = img_root
    annotation = pd.read_csv(os.path.join(img_root, "train/Annotations/train.csv"))
    annotation['image_id'] = annotation['image_id'].apply(lambda x: os.path.join(img_root,'train', x))
    warmup = pd.read_csv(os.path.join(img_root, "warm/Annotations/annotations.csv"))
    warmup['image_id'] = warmup['image_id'].apply(lambda x: os.path.join(img_root, 'warm', x))


    train_pd, val_pd = train_test_split(annotation, test_size=0.1, random_state=42,
                                        stratify=annotation['image_category'])

    val_pd = drop_shitdata(val_pd.copy())
    train_pd = pd.concat([train_pd, warmup],axis=0,ignore_index=True)
    train_pd.index = range(train_pd.shape[0])



    data_set = {}
    data_set['train'] = []
    data_set['train'].append(LMdata(train_pd, trainAug(input_size=(336,336))))
    data_set['train'].append(LMdata(train_pd, trainAug(input_size=(512, 512))))

    multi_loader = multiDataLoader(*data_set['train'], bs=8, num_workers=1)
    print multi_loader.vir_bc_per_epoch
    print multi_loader.vir_start_epochs


    batch = multi_loader.next(0)
    print batch[0].size()

    batch = multi_loader.next(0)
    print batch[0].size()

    batch = multi_loader.next(1)
    print batch[0].size()

    print multi_loader.Iterations
    print multi_loader.VirEpoch
    print multi_loader.Step