#!/usr/bin/env python 
# -*- coding: utf-8 -*-
# @Time    : 2019/1/14 8:40
# @Author  : Tang Yang
# @Desc    : 
# @File    : auto_export_and_train.py
import argparse
import os
import shutil
import time
import logging
import traceback
import numpy as np
import matplotlib.pyplot as plt

import itchat
import tensorflow as tf
from google.protobuf import text_format
from object_detection import exporter
from object_detection.protos import pipeline_pb2

from predict import predict_image

LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s"
logging.basicConfig(filename='info.log', level=logging.INFO, format=LOG_FORMAT)


def send_message(name, message, wechat_message):
    """
    发送消息给name
    :param wechat_message: 是否发送微信消息
    :param name: 消息要发送给谁
    :param message: 要发送的消息
    """
    # nickname = input('please input your firends\' nickname : ' )
    #   想给谁发信息，先查找到这个朋友,name后填微信备注即可,deepin测试成功
    # users = itchat.search_friends(name=nickname)
    print(message)
    logging.info(message[8:])
    if wechat_message:
        try:
            users = itchat.search_friends(name=name)  # 使用备注名来查找实际用户名
            # 获取好友全部信息,返回一个列表,列表内是一个字典
            # 获取`UserName`,用于发送消息
            user_name = users[0]['UserName']
            itchat.send(message, toUserName=user_name)
        except BaseException as _:
            logging.warning(traceback.format_exc())
            return


def export(pipeline_config_path, trained_checkpoint_prefix, output_directory,
           input_type='image_tensor', input_shape=None, config_override=''):
    """
    导出模型
    :param pipeline_config_path:
    :param trained_checkpoint_prefix:
    :param output_directory:
    :param input_type:
    :param input_shape:
    :param config_override:
    :return:
    """
    pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
    with tf.gfile.GFile(pipeline_config_path, 'r') as f:
        text_format.Merge(f.read(), pipeline_config)
    text_format.Merge(config_override, pipeline_config)
    if input_shape:
        input_shape = [
            int(dim) if dim != '-1' else None
            for dim in input_shape.split(',')
        ]
    else:
        input_shape = None
    exporter.export_inference_graph(input_type, pipeline_config,
                                    trained_checkpoint_prefix,
                                    output_directory, input_shape)


def find_models(train_dir, exported_list, min_iter, iter_gap):
    """
    找到合适测试的模型，返回迭代次数
    :param iter_gap: 导出模型之间的最小间隔
    :param min_iter: 导出模型的最小迭代次数，低于这个次数的模型不导出
    :param exported_list: 已经导出过的模型的列表
    :param train_dir: 训练模型的ckpt文件所在目录
    :return: 合适的模型的迭代次数，如果没有合适的模型，返回None
    """
    latest_checkpoint = tf.train.latest_checkpoint(train_dir)
    if latest_checkpoint is not None:
        iter_num = int(latest_checkpoint.split('.')[1].split('-')[1])
        if iter_num not in exported_list and iter_num >= min_iter:
            exported_list = sorted(exported_list)
            if iter_num - exported_list[-1] > iter_gap:
                return iter_num
    else:
        return None


def kill_train(pid_file_path):
    """
    杀掉训练程序
    :param pid_file_path:
    :return:
    """
    with open(pid_file_path) as f:
        pid = f.readline().strip()
        os.system('kill -9 ' + pid)
    if os.path.exists(pid_file_path):
        os.remove(pid_file_path)


def parse_args():
    parser = argparse.ArgumentParser()
    # 第一个是选项，第二个是数据类型，第三个默认值，第四个是help命令时的说明
    parser.add_argument(
        '--train_dir', type=str, default='', dest='train_dir',
        help='训练模型时ckpt文件保存的目录')
    parser.add_argument(
        '--pipeline_config_path', type=str, default='', dest='pipeline_config_path',
        help='配置(pipeline.config)文件所在路径')
    parser.add_argument(
        '--output_dir', type=str, default='', dest='output_dir',
        help='保存导出的pb模型的目录')
    parser.add_argument(
        '--label_map_path', type=str, default='', dest='label_map_path',
        help='pbtxt文件所在目录')
    parser.add_argument(
        '--image_dir', type=str, default='', dest='image_dir',
        help='测试图片所在目录')
    parser.add_argument(
        '--pid_file_path', type=str, default='../pid.txt', dest='pid_file_path',
        help='保存训练程序pid的文件路径')
    parser.add_argument(
        '--user_name', type=str, default='', dest='user_name',
        help='把所有的消息都发送给user_name这个微信用户')
    parser.add_argument(
        '--pb_file_postfix', type=str, default='', dest='pb_file_postfix',
        help='导出的pb模型的后缀，建议使用这个来区分不同品类的模型')
    parser.add_argument(
        '--min_iter', type=int, default=100000, dest='min_iter',
        help='导出模型的最小迭代次数，低于指定次数时的训练模型不导出')
    parser.add_argument(
        '--iter_gap', type=int, default=10000, dest='iter_gap',
        help='导出模型的迭代次数间隔')
    parser.add_argument(
        '--irsd', type=str, default='', dest='image_result_save_dir',
        help='检测结果文件保存路径')
    parser.add_argument(
        '--hotReload', type=bool, default=True, dest='hotReload',
        help='登录微信是否采用hotReload')
    parser.add_argument(
        '--checktime_gap', type=int, default=120, dest='checktime_gap',
        help='每两次检查是否有合适的模型之间的时间间隔，单位秒')

    return parser.parse_args()


def get_exported(file_path):
    exported = [0]
    previous_precision = [0]
    previous_recall = [0]
    if not os.path.exists(file_path):
        return exported, [0], [0]
    with open(file_path, 'r') as f:
        for line in f.readlines():
            nums = line.strip().split('\t')
            assert len(nums) == 3, "exported.txt file corrupt"
            iter_num = nums[0]
            if iter_num not in exported:
                exported.append(int(iter_num))
                previous_precision.append(float(nums[1]))
                previous_recall.append(float(nums[2]))

    return exported, previous_precision, previous_recall


def set_exported(num, file_path):
    with open(file_path, 'a+') as f:
        f.write(str(num))


def write_precision_and_recall(file_path, precision, recall):
    with open(file_path, 'a+') as f:
        f.write('\t' + str(precision) + '\t' + str(recall) + '\n')


def save_pr_line_chart(save_path, precision, recall, exported_list):
    p_data = np.array(precision[1:])
    r_data = np.array(recall[1:])
    exported_list = np.array(exported_list[1:])
    plt.plot(exported_list, p_data, label="Precision", color='r', marker='*')
    plt.plot(exported_list, r_data, label='Recall', color='b', marker='o')
    plt.xlabel('Iteration numbers')
    plt.ylabel('Precision & Recall')
    plt.legend()
    plt.savefig(save_path)


def init_wechat_message(user_name):
    itchat.auto_login(hotReload=True)
    users = itchat.search_friends(name=user_name)  # 使用备注名来查找实际用户名
    # 获取好友全部信息,返回一个列表,列表内是一个字典
    # 获取`UserName`,用于发送消息
    user_name = users[0]['UserName']
    itchat.send('[INFO]: 初始化', toUserName=user_name)


def main():
    args = parse_args()
    train_dir = args.train_dir
    min_iter = args.min_iter
    iter_gap = args.iter_gap
    pipeline_config_path = args.pipeline_config_path
    output_dir = args.output_dir
    pb_file_postfix = args.pb_file_postfix
    user_name = args.user_name
    image_dir = args.image_dir
    label_map_path = args.label_map_path
    image_result_save_dir = args.image_result_save_dir
    pid_file_path = args.pid_file_path
    chekcktime_gap = args.checktime_gap

    if user_name == '':
        print("[INFO]: 未指定user_name，不使用微信消息")
        wechat_message = False
    else:
        try:
            init_wechat_message(user_name)
            wechat_message = True
        except Exception as _:
            logging.warning(traceback.format_exc())
            print('[INFO]: 微信消息发送初始化失败，将不使用微信消息')
            wechat_message = False

    if train_dir == '' or pipeline_config_path == '' or output_dir == '' or label_map_path == '' or image_dir == '':
        print("[INFO]: 必须指定train_dir，pipeline_config_path，output_dir，label_map_path和image_dir")
        exit(1)

    if not os.path.exists(pid_file_path):
        send_message(user_name, "[INFO]: " + str(pid_file_path) + " not exist. Maybe training process not started?",
                     wechat_message)
        exit(1)

    exported_file_path = os.path.join(train_dir, "exported.txt")
    exported_list, previous_precision, previous_recall = get_exported(exported_file_path)

    count = 0
    send_message(user_name, "[INFO]: 查找模型中", wechat_message)
    while True:
        iter_nums = find_models(train_dir, exported_list, min_iter, iter_gap)
        if iter_nums is None:
            count += 1
            if count == 420:  # 420 * checktime_gap 秒之后如果还没有一个合适的模型则自动停止
                send_message(user_name, "[INFO]: 结束！", wechat_message)
                exit(1)
            print(
                time.strftime('%Y.%m.%d.%H:%M:%S', time.localtime(time.time())) + " - [INFO]: 暂时没有发现满足要求的模型. Sleeping.")
            time.sleep(chekcktime_gap)  # 为了不频繁占用CPU，检查一次之后挂起
            print(time.strftime('%Y.%m.%d.%H:%M:%S', time.localtime(time.time())) + " - [INFO]: 检查模型中")
            continue
        send_message(user_name, "[INFO]: 找到模型,迭代次数为 " + str(iter_nums), wechat_message)
        count = 0
        model_name = 'model.ckpt-' + str(iter_nums)
        set_exported(iter_nums, exported_file_path)
        trained_checkpoint_prefix = os.path.join(train_dir, model_name)
        kill_train(pid_file_path)
        temp_output_dir = os.path.join(output_dir, "temp")
        if not os.path.exists(temp_output_dir):
            os.mkdir(temp_output_dir)
        try:
            export(pipeline_config_path, trained_checkpoint_prefix, temp_output_dir)
            if not os.path.exists(output_dir):
                os.makedirs(output_dir)
            shutil.move(os.path.join(temp_output_dir, 'frozen_inference_graph.pb'),
                        os.path.join(output_dir, 'frozen_inference_graph.pb'))
            shutil.rmtree(temp_output_dir)
        except Exception as _:
            logging.warning(traceback.format_exc())
            send_message(user_name, "[INFO]: 导出第" + str(iter_nums) + "次训练模型失败，将尝试导出下次模型", True)
            exit(0)

        src_pb_path = os.path.join(output_dir, 'frozen_inference_graph.pb')
        pb_new_name = 'frozen_inference_graph_' + str(iter_nums) + '_' + pb_file_postfix + '.pb'

        rename_flag = True  # 标识rename成功或者失败
        try:
            os.rename(src_pb_path, os.path.join(output_dir, pb_new_name))
        except Exception:
            send_message(user_name, '[INFO]: 重命名pb文件失败. ' + pb_new_name +
                         " 已经存在或者 frozen_inference_graph.pb 不存在", wechat_message)
            rename_flag = False
        if rename_flag:
            pb_path = os.path.join(output_dir, pb_new_name)
        else:
            pb_path = os.path.join(output_dir, 'frozen_inference_graph.pb')
        send_message(user_name, "[INFO]: 导出模型成功，开始测试", wechat_message)
        precision, recall = 0, 0
        try:
            # 可以使用自己的预测程序，只要接口满足下面调用的要求，多余参数都有默认值即可
            precision, recall = predict_image(image_dir, label_map_path, pb_path,
                                              image_result_save_dir=image_result_save_dir)
            send_message(user_name, "[INFO]: \n" + str(iter_nums) +
                         ":\n\tprecision: " + str(precision) + "  recall: " + str(recall), wechat_message)
        except Exception as _:
            logging.warning(traceback.format_exc())
            send_message(user_name, "[INFO]: 测试过程中出现异常,将跳过测试.", wechat_message)
        write_precision_and_recall(exported_file_path, precision, recall)
        previous_precision.append(precision)
        previous_recall.append(recall)
        exported_list.append(iter_nums)
        save_pr_line_chart(os.path.join(train_dir, "PR.jpg"), previous_precision, previous_recall, exported_list)
        exit(0)


if __name__ == '__main__':
    main()
