# coding=utf8
from __future__ import division
import os
import torch
import torch.utils.data as data
import numpy as np
import pandas as pd
import cv2
from collections import OrderedDict
from itertools import chain
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import DataLoaderIter

# from FSdataset import attr2idx_map, idx2attr_map, attr2catidx_map, label_map
# from LMdataset import cate2idx, idx2cate, Skdict
from FSdataset import FSdata
from FSdataset import collate_fn as FScollfn
from LMdataset import LMdata
from LMdataset import collate_fn as LMcollfn

class DataProvider:
    def __init__(self, FSroot_path, FSanno_pd, FSAug, FSselect_AttrIdx,
                 LManno_pd, LMAug,
                 FSbatch_size, LMbatch_size):
        self.FSbatch_size = FSbatch_size
        self.LMbatch_size = LMbatch_size
        self.FSdataset = {
            'train':FSdata(root_path=FSroot_path,
                                    anno_pd=FSanno_pd['train'],
                                    transforms=FSAug['train'](),
                                    select=FSselect_AttrIdx),

            'val': FSdata(root_path=FSroot_path,
                            anno_pd=FSanno_pd['val'],
                            transforms=FSAug['val'](),
                            select=FSselect_AttrIdx)
        }

        self.LMdataset = {
            'train' : LMdata(LManno_pd['train'],LMAug['train']()),
            'val' : LMdata(LManno_pd['val'],LMAug['val']()),
        }


        self.FSdataloader = {'train':None, 'val': None}
        self.LMdataloader = {'train': None, 'val': None}


        self.FSdataiter = {'train':None, 'val': None}
        self.LMdataiter = {'train':None, 'val': None}

        self.FSiteration = 0
        self.LMiteration = 0

        self.FSepoch = 0
        self.LMepoch = 0

        self.FSdataloader['val'] = DataLoader(
                self.FSdataset['val'], self.FSbatch_size['val'],
                num_workers=4, shuffle=False, pin_memory=False, collate_fn=FScollfn)


        self.LMdataloader['val'] = DataLoader(
                self.LMdataset['val'], self.LMbatch_size['val'],
                num_workers=4, shuffle=False, pin_memory=False, collate_fn=LMcollfn)

    def FSbuild(self):
        self.FSdataloader['train'] = DataLoader(
                self.FSdataset['train'], self.FSbatch_size['train'],
                num_workers=4, shuffle=True, pin_memory=False, collate_fn=FScollfn)

        # self.FSdataloader['val'] = DataLoader(
        #         self.FSdataset['val'], self.FSbatch_size['val'],
        #         num_workers=4, shuffle=False, pin_memory=False, collate_fn=FScollfn)

        self.FSdataiter['train'] = DataLoaderIter(self.FSdataloader['train'])

    def LMbuild(self):
        self.LMdataloader['train'] = DataLoader(
                self.LMdataset['train'], self.LMbatch_size['train'],
                num_workers=4, shuffle=True, pin_memory=False, collate_fn=LMcollfn)

        # self.LMdataloader['val'] = DataLoader(
        #         self.LMdataset['val'], self.LMbatch_size['val'],
        #         num_workers=4, shuffle=False, pin_memory=False, collate_fn=LMcollfn)

        self.LMdataiter['train'] = DataLoaderIter(self.LMdataloader['train'])

    def FSnext(self):
        if self.FSdataiter['train'] is None:
            self.FSbuild()

        try:
            batch = self.FSdataiter['train'].next()
            self.FSiteration += 1
            return batch

        except StopIteration:
            self.FSepoch += 1
            self.FSbuild()
            self.FSiteration = 1

            batch = self.FSdataiter['train'].next()
            return batch

    def LMnext(self):
        if self.LMdataiter['train'] is None:
            self.LMbuild()

        try:
            batch = self.LMdataiter['train'].next()
            self.LMiteration += 1
            return batch

        except StopIteration:
            self.LMepoch += 1
            self.LMbuild()
            self.LMiteration = 1

            batch = self.LMdataiter['train'].next()
            return batch


if __name__ == '__main__':
    from FSaugClass import FSAugTrain, FSAugVal
    from LMaugClass import LMAugTrain, LMAugVal
    from LMaugClass import drop_shitdata
    from sklearn.model_selection import train_test_split
    from FSdataset import idx2attr_map

    # prepare FS dataset
    FSroot_path = '/media/gserver/data/FashionAI'
    all_pd = pd.read_csv(os.path.join(FSroot_path, 'base/Annotations/label.csv'),
                         header=None, names=['ImageName', 'AttrKey', 'AttrValues'])
    all_pd['ImageName'] = all_pd['ImageName'].apply(lambda x: os.path.join('base', x))

    train_pd, val_pd = train_test_split(all_pd, test_size=0.1, random_state=37,
                                        stratify=all_pd['AttrKey'])

    # select part
    select_AttrIdx = range(8)
    select_AttrKey = [idx2attr_map[x] for x in select_AttrIdx]
    train_pd = train_pd[train_pd['AttrKey'].apply(lambda x: True if x in select_AttrKey else False)]
    val_pd = val_pd[val_pd['AttrKey'].apply(lambda x: True if x in select_AttrKey else False)]

    add_skirt = pd.read_csv(os.path.join(FSroot_path, 'base/Annotations/data_add_skirt_legth.csv'),
                            header=None, names=['ImageName', 'AttrKey', 'AttrValues'])
    add_skirt['ImageName'] = add_skirt['ImageName'].apply(lambda x: os.path.join('web', x))
    train_pd = pd.concat([train_pd, add_skirt], axis=0, ignore_index=True)

    FStrain_pd = train_pd
    FSval_pd = val_pd

    print FStrain_pd.shape, FSval_pd.shape


    #  prepare LM dataset
    img_root = "/media/gserver/data/landmark/rawdata/"
    annotation = pd.read_csv(os.path.join(img_root, "train/Annotations/train.csv"))
    annotation['image_id'] = annotation['image_id'].apply(lambda x: os.path.join(img_root,'train', x))
    warmup = pd.read_csv(os.path.join(img_root, "warm/Annotations/annotations.csv"))
    warmup['image_id'] = warmup['image_id'].apply(lambda x: os.path.join(img_root, 'warm', x))


    train_pd, val_pd = train_test_split(annotation, test_size=0.1, random_state=42,
                                        stratify=annotation['image_category'])

    val_pd = drop_shitdata(val_pd.copy())
    train_pd = pd.concat([train_pd, warmup],axis=0,ignore_index=True)
    train_pd.index = range(train_pd.shape[0])

    val_pd = drop_shitdata(val_pd.copy())
    LMtrain_pd = train_pd
    LMval_pd = val_pd

    print LMtrain_pd.shape, LMval_pd.shape


    FSanno_pd = {'train': FStrain_pd, 'val':FSval_pd}
    LManno_pd = {'train': LMtrain_pd, 'val': LMval_pd}
    FSAug = {'train': FSAugTrain, 'val': FSAugVal}
    LMAug = {'train': LMAugTrain, 'val': LMAugVal}
    FSselect_AttrIdx = range(8)
    FSbatch_size = {'train': 8, 'val': 2}
    LMbatch_size = {'train': 8, 'val': 2}

    data_provider = DataProvider(
        FSroot_path, FSanno_pd, FSAug, FSselect_AttrIdx,
        LManno_pd, LMAug,
        FSbatch_size, LMbatch_size
    )
    epoch_num = 2
    import time

    step = -1
    while data_provider.FSepoch < epoch_num:
        step +=1

        t0 = time.time()
        batch_data = data_provider.FSnext()
        t1 = time.time()

        if step % 2 == 0:
            batch_data = data_provider.LMnext()

        print data_provider.FSepoch, data_provider.FSiteration, len(batch_data)
