#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: liang kang
@contact: gangkanli1219@gmail.com
@time: 1/3/18 4:25 PM
@desc: create record file for tensorflow
"""
import argparse
import os

import cv2
import tensorflow as tf
import numpy as np

from utils.io import read_text_file


def parse_args():
    parser = argparse.ArgumentParser(description='Prepare lists txt file for dataset')
    parser.add_argument('--root', dest='root', help='程序工作根目录', default='', type=str)
    parser.add_argument('--dataset', dest='dataset', help='将要创建的数据集类型，train 或者 val',
                        choices=['train', 'val'], default='', type=str)
    parser.add_argument('--postfix', dest='postfix', help='读取的文件和生成的文件前缀', default='', type=str)
    parser.add_argument('--prefix', dest='prefix', help='读取的文件和生成的文件后缀', default='', type=str)
    parser.add_argument('--split', dest='split', help='分割不同条目的分割符', default='&!&', type=str)
    parser.add_argument('--max-number', dest='max_number', help='单张图像的最大人数', default=88, type=int)
    parser.add_argument('--min-number', dest='min_number', help='单张图像的最小人数', default=24, type=int)
    parser.add_argument('--class-num', dest='class_num', help='分类数量', default=10, type=int)
    parser.add_argument('--max-instance', dest='max_instance', help='需要理由的样本数', default=1000000, type=int)
    return parser.parse_args()


def get_crowd_counting_tf_example(img_path, txt_path):
    """
    返回用于人群计数的tensorflow样例

    Parameters
    ----------
    img_path:
    txt_path:

    Returns
    -------

    """
    with tf.gfile.GFile(img_path, 'rb') as fid:
        encoded_jpg = fid.read()

    # 将txt文件的点读入并转化为ndarray
    points = read_text_file(txt_path)
    points = list(map(lambda x: x.split(' '), points))
    points = list(map(lambda x: [float(x[0]), float(x[1])], points))
    points_array = np.asarray(points)

    # 计算该张图像的标签
    bin_value = (args.max_number - args.min_number) / args.class_num
    label = int(min(round(len(points) / bin_value), args.class_num - 1))

    example = tf.train.Example(features=tf.train.Features(feature={
        'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[label])),
        'image': tf.train.Feature(bytes_list=tf.train.BytesList(value=[encoded_jpg])),
        'number': tf.train.Feature(int64_list=tf.train.Int64List(value=[len(points)])),
        'points': tf.train.Feature(bytes_list=tf.train.BytesList(value=[points_array.tobytes()]))
    }))
    return example


def run_main():
    file_name = '{}_{}_{}'.format(args.prefix, args.dataset, args.postfix)
    writer = tf.python_io.TFRecordWriter(os.path.join(args.root, file_name + '.record'))

    # 获取将要用于生成record文件的文件名列表
    file_list = read_text_file(os.path.join(args.root, file_name + '.txt'))
    if args.dataset == 'train' and args.max_instance < len(file_list):
        np.random.shuffle(file_list)
        file_list = file_list[:args.max_instance]

    # 用于计算图像均值
    mean = np.zeros(3, np.float128)
    buffer_mean = np.zeros(3, np.float128)

    # 保存真实在record中要记录的样本数量
    total = min(args.max_instance, len(file_list))

    for idx, example in enumerate(file_list):
        if idx % 500 == 0:
            print('On image {} of {}'.format(idx, total))
        txt_path, img_path = example.split(args.split)

        # 用于计算均值
        img = cv2.imread(img_path.strip())
        buffer_mean += np.mean(img, axis=(0, 1))
        if (idx + 1) % 10000 == 0:
            mean += (buffer_mean / total)
            buffer_mean = np.zeros(3, np.float128)

        if idx > args.max_instance:
            break
        tf_example = get_crowd_counting_tf_example(img_path.strip(), txt_path)
        writer.write(tf_example.SerializeToString())

    # 计算剩余部分均值，如果是训练集，则保存均值
    mean += (buffer_mean / total)
    if args.dataset == 'train':
        np.savetxt(os.path.join(args.root, '{}_mean_{}.txt'.format(args.prefix, args.postfix)),
                   mean, fmt='%.5f')

    writer.close()


if __name__ == '__main__':
    args = parse_args()
    run_main()
