import os
from os import path
import logging
import numpy as np
import pandas as pd
from multiprocessing.pool import Pool
from threading import Thread, Lock 
from queue import Queue
from collections import deque, namedtuple
import shutil
from scipy import misc
from config import cfg
import app_logger

logger = logging.getLogger("utils")
Img_Attr = namedtuple("Img_Attr", ["img", "attr"])
def resize_square_img(jpg, new_size):
    '''
        resize image, if height > width, chop extra area from top & bottom, and resize to `new_size`
        else, chop extra area from left & right, resize to `new_size`
        jpg:      np array
        new_size: integer 
    '''
    height = jpg.shape[0]
    width = jpg.shape[1]
    def __chop_height(img):
        bound = (height - width) // 2
        top = bound
        bottom = top + width 
        new_img = img[top:bottom, :]
        return misc.imresize(new_img, (new_size, new_size))

    def __chop_width(img):
        bound = (width - height) // 2
        left = bound
        right = top + height
        new_img = img[:, left:right]
        return misc.imresize(new_img, (new_size, new_size))

    if height > width:
        return __chop_height(jpg)
    else:
        return __chop_width(jpg)


def read_attr_list(total_img = 0):
    '''
       read upto `total_img` attr, if total_img less than 1, 
       read all attr
       meanwhile, the attr value will be converted to float32,
       range 0 - 1
    '''
    list_attr_file = cfg.origin_data_dir + "/list_attr_celeba.txt"
    
    logging.info("read attr list from `%s`" % list_attr_file)
    with open(list_attr_file) as f:
        total_img_cnt = int(f.readline())
        if total_img < 1 or total_img > total_img_cnt:
            total_img = total_img_cnt
        attr_name = f.readline().split()
        index_name = [""] * total_img 
        attr_list = [None] * total_img
        logging.info("total img count from attr list = %d" % total_img_cnt)
        for i in range(total_img):
            line = f.readline().strip()
            seg = line.split()
            index_name[i] = seg[0]
            attr_list[i] = [(int(x) + 1)/2. for x in seg[1:]]

    return pd.DataFrame(attr_list, index = index_name, columns=attr_name, dtype = np.float32)

def save_attr(dir_name, attr_df, attr_list = None):
    if attr_list is not None:
        attr_df = attr_df.loc[:, attr_list]
    attr_np = np.asarray(attr_df, dtype = np.float32)
    attr_col = np.asarray(attr_df.columns)
    img_names = np.asarray(attr_df.index)
    if not path.exists(dir_name):
        os.mkdir(dir_name)
    np.save(dir_name + "/attr_array", attr_np, allow_pickle=False)
    np.save(dir_name + "/attr_names", attr_col)
    np.save(dir_name + "/img_name_list", img_names)

def load_attr(dir_name, is_attr_df = False):
    attr_np = np.load(dir_name + "/attr_array.npy", allow_pickle=False)
    attr_col = np.load(dir_name + "/attr_names.npy")
    img_names = np.load(dir_name + "/img_name_list.npy")
    if is_attr_df:
        attr_df = pd.DataFrame(attr_np, columns = attr_col, index = img_names)
        return attr_df
    else:
        return attr_np, (attr_col, img_names)
    
def load_img_and_save2partition(attr_df):
    img_cnt = len(attr_df)
    img_index = attr_df.index
    new_size = cfg.img_size
    img_list = [None] * img_cnt
    start_img =  img_index[0].split('.')[0]
    end_img   = img_index[-1].split('.')[0]
    
    for i, img_file in enumerate(img_index):
        img_file = cfg.origin_data_dir + "/img_align_celeba/" + img_file
        with open(img_file, 'rb') as f:
            img_list[i] = resize_square_img(misc.imread(f), new_size)
            img_list[i] = np.asarray(img_list[i], dtype=np.float32)/127. - 1. 
    
    save_path = "%s/%s-%s.npz" % (cfg.dataset, start_img, end_img) 
    np.savez(save_path, img = img_list, attr = np.asarray(attr_df))
    return save_path
    
def repartition_images_disk(img_num_one_block = 10000, attr_list = None):
    '''
    save images as seperated blocks in disk
    '''
    if path.exists(cfg.dataset):
        shutil.rmtree(cfg.dataset)
    os.mkdir(cfg.dataset)
    
    attr_df = read_attr_list()
    if attr_list is not None:
        attr_df = attr_df.loc[:, attr_list]
        
    img_num = len(attr_df)
    block_size = img_num // img_num_one_block
    cur_img = 0
    img_blocks = [] 
    for i in range(block_size):
        img_blocks.append(attr_df.iloc[cur_img:cur_img + img_num_one_block, :])
        cur_img += img_num_one_block
    if cur_img < img_num:
        img_blocks.append(attr_df.iloc[cur_img:])

    with Pool(cfg.thread_num) as pool:
        saved_paths = pool.map(load_img_and_save2partition, img_blocks)
    logger.debug("img partitions:\n%r" % saved_paths)
    return saved_paths

class CelebSample():
    def __init__(self, start=0., end=0.8, batch_size = 128, shuffle = True, is_train = True):
        img_partions = os.listdir(cfg.dataset)
        num = len(img_partions)
        start = int(start * num)
        end = int(end * num)
        if end - start <= 0:
            raise Exception("data set too small, less than one partition")
        self.img_partition_files = ["%s/%s" %(cfg.dataset, x) for x in img_partions[start : end]] 
        self.batch_size = batch_size
        self.is_train = is_train
        self.__data_queue = deque(maxlen = (cfg.img_part_num//batch_size)* 2)
        logger.debug("buffer queue max size = %d" % self.__data_queue.maxlen)
        # left data that can't feed into one batch fully when load from disk
        self.__margin_data = None
        self.__shuffle_idx = np.arange(end - start)
        self.__reloading = False
        self.__cur_part_id = 0
        self.__part_num = len(self.img_partition_files)
        self.__len = (end - start) * cfg.img_part_num // batch_size
        self.__shuffle = shuffle
        if shuffle:
            np.random.shuffle(self.__shuffle_idx)
        
        self.__lock = Lock()
        self.__signal_queue = Queue(maxsize=10)
        reload_thread = Thread(target=self.__reload_backend)
        reload_thread.setDaemon(True)
        reload_thread.start()
        self.reload_queue()
        
    def get_all_partition(self):
        part_file = self.img_partition_files[self.__shuffle_idx[0]]
        data = np.load(part_file)
        img_all = data["img"]
        attr_all = data["attr"]
        for idx in self.__shuffle_idx[1:]:
            part_file = self.img_partition_files[idx]
            data = np.load(part_file)
            img = data["img"]
            attr = data["attr"]
            img_all = np.concatenate((img_all, img))
            attr_all = np.concatenate((attr_all, attr))
        return img_all, attr_all
    
    def reload_queue(self):
        shaffled_idx = self.__shuffle_idx[self.__cur_part_id]
        self.__cur_part_id += 1
        if self.__cur_part_id >= self.__part_num:
            self.__cur_part_id = 0
            if self.__shuffle:
            # reshuffle partition idx
                logger.debug("reshuffle index")
                np.random.shuffle(self.__shuffle_idx)
                
        part_file = self.img_partition_files[shaffled_idx]
        logger.debug("reload from %s" % part_file)
        data = np.load(part_file)
        img = data["img"]
        attr = data["attr"]
        if self.__margin_data is not None:
            img = np.concatenate((self.__margin_data.img, img))
            attr = np.concatenate((self.__margin_data.attr, attr))
        data_num = len(img)
        logger.debug("data num of current part[with previous margin data] = %d" % data_num)
        if self.__shuffle:
            sidx = np.arange(data_num)
            np.random.shuffle(sidx)
            img = img[sidx]
            attr = attr[sidx]
        batch_cnt = data_num//self.batch_size
        i = 0
        for _ in range(batch_cnt):
            img_attr = Img_Attr(img = img[i : i+self.batch_size],
                                attr = attr[i: i+self.batch_size])
            i += self.batch_size
            with self.__lock:
                self.__data_queue.appendleft(img_attr)
        if i < data_num:
            self.__margin_data = Img_Attr(img = img[i:], attr = attr[i:])
        logger.debug("reload into %d baches, left margin = %d" % (batch_cnt, data_num - i))
        
    def __reload_backend(self):
        while True:
            self.__signal_queue.get()
            self.reload_queue()
            self.__reloading = False
            
    def __len__(self):
        return self.__len
    
    def __getitem__(self, idx0):
        try:
            with self.__lock:
                img, attr = self.__data_queue.pop()
        except IndexError:
            logger.warning("buffer queue used up!")
            self.reload_queue()
            img, attr = self.__data_queue.pop()
            
        with self.__lock:
            if (not self.__reloading) and \
                len(self.__data_queue) < 0.3 * self.__data_queue.maxlen:
                self.__signal_queue.put(1)
                self.__reloading = True
                logger.debug("start reload buffer queue, current buffer size = %d"%len(self.__data_queue))

        for i in range(self.batch_size):
            if self.is_train and np.random.uniform() > 0.5:
                img[i] = img[i][:, ::-1]
        return img, attr
        
    
def expand_attr2mat(attr_array):
    '''
        expand each binary attr to 2x2 matrix
        input dim:   batch_size, attr_num
        output dim:  batch_size, 2, 2, attr_num 
    '''
    expand = attr_array[:, np.newaxis, np.newaxis, :]
    pos = np.tile(expand, [1, 1, 2, 1])
    neg = np.tile(1. - expand, [1, 1, 2, 1])
    return np.concatenate((pos, neg), 1)

def save_grid_imgs(imgs, w, step = 0, name = "grid"):
    n = w**2 
    img_name = "%s/%s_%05d.jpg" % (cfg.imgdir, name, step)
    imgs = imgs[:n]
    img_w = cfg.img_size * w 
    assert(n < 1 + len(imgs))
    black_imgs = np.zeros((img_w, img_w, 3), np.uint8)
    for i in range(w):
        for j in range(w):
            ii = i*w + j
            black_imgs[i*cfg.img_size : (i+1)*cfg.img_size, 
                       j*cfg.img_size : (j+1)*cfg.img_size, :] = \
                       np.clip(imgs[ii] * 127.5 + 127.5, 0, 255)

    misc.imsave(img_name, black_imgs)
    
import unittest, time
class TestSample(unittest.TestCase):
    def setUp(self):
        cfg.batch_size = 16
        # 100 baches(aka. 1618 img), 18 margin samples
        cfg.origin_data_dir = "../DataSets/CelebA_test"
        # result in 6 blocks
        cfg.img_part_num = 16*20
        if not path.exists(cfg.imgdir):
            os.mkdir(cfg.imgdir)
            
    def test_repartition_images_disk(self):
        saved_paths = repartition_images_disk(img_num_one_block = cfg.img_part_num, 
            attr_list = (
            "Arched_Eyebrows","Bags_Under_Eyes","Bald","Bangs",
            "Big_Lips","Black_Hair","Blond_Hair","Brown_Hair","Bushy_Eyebrows","Chubby","Eyeglasses",
            "Goatee","Gray_Hair","High_Cheekbones","Male","Mouth_Slightly_Open","Mustache","Narrow_Eyes",
            "No_Beard","Pale_Skin","Pointy_Nose","Receding_Hairline","Smiling","Straight_Hair","Wavy_Hair","Young"))
        self.assertEqual(saved_paths[1], "%s/%06d-%06d.npz"%(cfg.dataset, cfg.img_part_num+1, cfg.img_part_num*2), "partition wrong!")
        self.assertEqual(len(saved_paths), 6, 'partition num wrong!')
        
    def test_celeb_sample_len(self):
        sample = CelebSample(start=0., end=1., batch_size = cfg.batch_size, shuffle = False)
        logger.debug("len(sample) = %d" % len(sample))
        self.assertEqual(len(sample), 120, "length error")
        sample = CelebSample(start=0.2, end=.8, batch_size = cfg.batch_size, shuffle = False)
        logger.debug("len(sample) = %d" % len(sample))
        self.assertEqual(len(sample), 60, "length error")
    
        sample = CelebSample(start=0.8, end=1., batch_size = cfg.batch_size, shuffle = False)
        logger.debug("len(sample) = %d" % len(sample))
        self.assertEqual(len(sample), 40, "length error")
        
    def test_celeb_sample(self):
        sample = CelebSample(start=0., end=1., batch_size = cfg.batch_size,
                is_train=False, shuffle = False)
        N = 102
        img_list = [None] * N
        attr_list = [None] * N
        for i in range(N):
            logger.debug("iter: %d" % i)
            img_list[i], attr_list[i]  = sample[i]
            time.sleep(0.05)
            self.assertEqual(len(img_list[i]), cfg.batch_size, "img batch size wrong")
            self.assertEqual(len(attr_list[i]), cfg.batch_size, "attr batch size wrong")
        
        self.assertTrue(np.all(img_list[0][0] == img_list[-1][2]), "img content wrong")
        
    def test_celeb_sample2(self):
        sample = CelebSample(start=0., end=1., batch_size = cfg.batch_size)
        N = 204
        img_list = [None] * N
        attr_list = [None] * N
        for i in range(N):
            logger.debug("iter: %d" % i)
            img_list[i], attr_list[i]  = sample[i]
            time.sleep(0.05)
            self.assertEqual(len(img_list[i]), cfg.batch_size, "img batch size wrong")
            self.assertEqual(len(attr_list[i]), cfg.batch_size, "attr batch size wrong")
        
        np.savez("test.npz", img_list[0], img_list[-1])
        
    def test_grid_save(self):
        sample = CelebSample(start=0., end=.7, batch_size = cfg.batch_size, shuffle = False)
        imgs, _ = sample.get_all_partition()
        logger.debug("shape of batch " + str(imgs.shape))
        imgs = save_grid_imgs(imgs, 8)
        
if __name__ == "__main__":
    unittest.main()
