from __future__ import print_function
from __future__ import division

import random

import torch
import numpy as np
from ..set import VehicleID, InShop, SOProducts, Car, Cub
from ..set import transform
from .sampler import ClassBalancedSampler

datasets = {
    'sop': SOProducts,
    'inshop': InShop,
    'vid': VehicleID,
    'car': Car,
    'cub': Cub
}

import sys
# https://www.jb51.net/article/171637.htm
def print_ng(*args, sep=' ', end='\n', file=None):
  # 获取被调用函数在被调用时所处代码行数
  line = sys._getframe().f_back.f_lineno
  # 获取被调用函数所在模块文件名
  file_name = sys._getframe(1).f_code.co_filename
  # sys.stdout.write(f'"{__file__}:{sys._getframe().f_lineno}"  {x}\n')
  args = (str(arg) for arg in args) # REMIND 防止是数字不能被join
  # 打印到标准输出，并设置文字和背景颜色
  sys.stdout.write(f'"{file_name}:{line}" \033[0;94m{"".join(args)}\033[0m\n') # 36 93 96 94
print = print_ng

def make(config, model, type, subset_indices=None, inshop_type=None):
    """
    subset_indices: indices for selecting subset of dataset, for creating
        clustered dataloaders.
    type: 'init', 'eval' or 'train'.
    """
    # inshop_types: train, query, gallery; basically instead of labels/classes
    ds_name = config['dataset_selected']
    print_ng(f'make {type} dataloader for {ds_name} dataset')
    tf = transform.make(**config['transform_parameters'], is_train=True if type == 'train' else False)
    if ds_name == 'inshop':
        ds = datasets[ds_name](
            root=config['dataset'][ds_name]['root'],
            dset_type=inshop_type,
            transform=tf)
    else:
        ds = datasets[ds_name](
            root=config['dataset'][ds_name]['root'],
            classes=config['dataset'][ds_name]['classes'][type],
            transform=tf)
    if type == 'train':
        ds.set_subset(subset_indices)
        _c = config['dataloader']
        dl = torch.utils.data.DataLoader(
            ds,
            # ignore batch_size, since batch_sampler enabled
            **{k: _c[k]
               for k in _c if k != 'batch_size'},
            batch_size=-1,
            batch_sampler=ClassBalancedSampler(
                ds,
                batch_size=config['dataloader']['batch_size'],
                num_samples_per_class=config['num_samples_per_class']))
    else:
        # else init or eval loader
        dl = torch.utils.data.DataLoader(ds, **config['dataloader'])
    return dl


def make_from_clusters(sub_indices, model, config):
    import numpy as np
    from math import ceil
    # DataLoader object list
    dataloaders = []
    for c in range(config['nb_clusters']):
        obj = make(config=config, model=model, type='train', subset_indices=sub_indices[c], inshop_type='train')
        obj.dataset.id = c
        dataloaders.append(obj)
    return dataloaders


def merge(dls_non_iter):

    nb_batches_per_dl = [len(dl) for dl in dls_non_iter]
    nb_batches = max(nb_batches_per_dl)
    I = range(len(dls_non_iter))
    # length = len(dls_non_iter)
    dls = [iter(dl) for dl in dls_non_iter]

    for _ in range(nb_batches):
        for i in I:
            b = next(dls[i], None)
            if b == None:
                # initialize new dataloader in case no batches left
                dls[i] = iter(dls_non_iter[i])
                b = next(dls[i])
            yield b, dls[i].dataset
