# -*- coding: utf-8 -*-#

# -------------------------------------------------------------------------------
# Name:         dataset.dataloader
# Description:
# Author:       yhm
# Date:         2020/9/29
# -------------------------------------------------------------------------------
import os.path as osp
import sys
import pathlib
__dir__ = pathlib.Path(osp.abspath(__file__))
sys.path.append(str(__dir__.parent.parent))

import cv2
import math
import numpy as np
import imgaug.augmenters as iaa
from shapely.geometry import Polygon

from db_config import cfg
from dataset.label_maker import make_score_map, make_border_map
from dataset.transform import transform, crop, resize
from dataset.utils import show_polys


def load_all_ans(gt_paths):
    '''
    :param gt_paths: path list
    :return: res-> dict {number : dict{poly : label}}
    '''
    res = []
    for gt in gt_paths:
        lines = []

        reader = open(gt, 'r').readlines()
        for line in reader:
            item = {}
            parts = line.strip().split(',')
            text = parts[-1]
            if text == '1':
                text = '###'
            line = [i.strip('\ufeff').strip('\xef\xbb\xbf') for i in parts]
            num_points = math.floor((len(line) -1) /2) *2
            # 将string对象变为float -> map
            poly = np.array(list(map(float, line[:num_points]))).reshape((-1, 2)).tolist()
            if len(poly) < 3:
                continue
            item['poly'] = poly
            item['text'] = text
            lines.append(item)
        res.append(lines)
    return res


def mean_image_subtraction(image):
    for i in range(3):
        image[..., i] -= cfg.MEANS[i]
    return image


def generate(batch_size= cfg.TRAIN.BATCH_SIZE, data_dir= cfg.TRAIN.DATA_DIR , is_training = True):
    is_training = is_training
    image_size = cfg.TRAIN.IMG_SIZE

    split = 'train' if is_training else 'test'
    with open(osp.join(data_dir, f'{split}_list.txt')) as f:
        image_fnames = f.readlines()
        image_paths = [osp.join(data_dir, f'{split}_images', image_fname.strip()) for image_fname in image_fnames]
        gt_paths = [osp.join(data_dir, f'{split}_gts', image_fname.strip() + '.txt') for image_fname in image_fnames]
        all_ans = load_all_ans(gt_paths)
    transform_aug = iaa.Sequential([ \
        iaa.Fliplr(0.5),\
        iaa.Affine(rotate=(-10, 10)),\
        iaa.Resize((0.5, 3.0))])
    dataset_size = len(image_paths)
    indices = np.arange(dataset_size)
    if is_training:
        np.random.shuffle(indices)
    current_idx = 0
    b = 0
    while True:
        if current_idx >= dataset_size:
            if is_training:
                np.random.shuffle(indices)
            current_idx = 0
        if b == 0:
            batch_images = np.zeros([batch_size, image_size, image_size, 3], dtype=np.float32)
            batch_gts = np.zeros([batch_size, image_size, image_size, 1], dtype=np.float32)
            batch_masks = np.zeros([batch_size, image_size, image_size, 1], dtype=np.float32)
            batch_thresh_maps = np.zeros([batch_size, image_size, image_size, 1], dtype=np.float32)
            batch_thresh_masks = np.zeros([batch_size, image_size, image_size, 1], dtype=np.float32)
            batch_loss = np.zeros([batch_size, ], dtype=np.float32)
        i = indices[current_idx]
        image_path = image_paths[i]
        anns = all_ans[i]
        image = cv2.imread(image_path)
        if is_training:
            # 固定变换序列,之后就可以先变换图像然后变换关键点,这样可以保证两次的变换完全相同。
            # 如果调用次函数,需要在每次batch的时候都调用一次,否则不同的batch执行相同的变换。
            transform_aug = transform_aug.to_deterministic()
            image, anns = transform(transform_aug, image, anns)
            image, anns = crop(image, anns)
        image, anns = resize(image_size, image, anns)

        anns = [ann for ann in anns if Polygon(ann['poly']).is_valid]
        gt = np.zeros((image_size, image_size), dtype=np.float32)
        mask = np.ones((image_size, image_size), dtype=np.float32)
        thresh_map = np.zeros((image_size, image_size), dtype=np.float32)
        thresh_mask = np.zeros((image_size, image_size), dtype=np.float32)

        gt, mask = make_score_map(anns, gt, mask)

        try:
            for ann in anns:
                thresh_map, thresh_mask = make_border_map(ann, thresh_map, thresh_mask)
        except IndexError:
            continue

        thresh_map = thresh_map * (cfg.THRESH_MIN - cfg.THRESH_MAX) + cfg.THRESH_MIN

        image = image.astype(np.float32)
        image = mean_image_subtraction(image)/255.0
        batch_images[b] = image
        batch_gts[b] = gt[:,:,np.newaxis]
        batch_masks[b] = mask[:,:,np.newaxis]
        batch_thresh_maps[b] = thresh_map[:,:,np.newaxis]
        batch_thresh_masks[b] = thresh_mask[:,:,np.newaxis]
        b+=1
        current_idx +=1
        if b == batch_size:
            yield batch_images, batch_gts, batch_masks, batch_thresh_maps, batch_thresh_masks
            b = 0


if __name__ == '__main__':
    train_data_generator = generate(batch_size = cfg.TRAIN.BATCH_SIZE, data_dir= cfg.TRAIN.DATA_DIR, \
                                    is_training=True)
#     print(time.time())
    for i in range(1):
        train_data = next(train_data_generator)
        for j in range(5):
            print(train_data[j].shape)

        # if(train_data[i].shape[0] == 8 for i in range(5)):
        #     print("ok")
#
