import os
import urllib
import sys
import tarfile
import tensorflow as tf
import numpy as np
import PIL.Image as Image
import matplotlib.pyplot as plt


def download_file(url, dest_dir):
    """
    下载文件
        :param url:
        :param dest_dir:
        :return:
    """
    # 数组[-1]得到最后一个元素
    file_name = str(url).split("/")[-1]
    filepath = os.path.join(dest_dir, file_name)
    if not os.path.exists(dest_dir):
        os.mkdir(dest_dir)
    if not os.path.exists(filepath):
        def _progress(count, block_size, total_size):
            sys.stdout.write(
                '\r>> Downloading %s %.1f%%' % (file_name, float(count * block_size) / float(total_size) * 100.0))
            sys.stdout.flush()

        filepath, _ = urllib.request.urlretrieve(url, filepath, _progress)
        statinfo = os.stat(filepath)
        print('Successfully downloaded', file_name, statinfo.st_size, 'bytes.')


def extract(file_dir, dest_dir):
    """
    解压文件
        :param file_dir:
        :param dest_dir:
        :return:
    """
    tarfile.open(file_dir, 'r:gz').extractall(dest_dir)


def load_graph(file_path):
    """
        加载训练好的模型
        :param file_path:
        :return:
    """
    inception_graph_def_file = os.path.join(file_path, 'classify_image_graph_def.pb')
    with tf.gfile.FastGFile(inception_graph_def_file, "rb") as file:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(file.read())
        _ = tf.import_graph_def(graph_def, name='')


def load_resource(file_dir):
    map1 = {}
    map2 = {}
    map3 = {}
    map_path = os.path.join(file_dir, "imagenet_synset_to_human_label_map.txt")
    pbtxt_path = os.path.join(file_dir, "imagenet_2012_challenge_label_map_proto.pbtxt")
    with open(map_path) as file:
        lines = file.readlines()
        for i in range(len(lines)):
            content = lines[i].split("	")
            if content and len(content) > 0:
                map1[content[0]] = content[1]

    with open(pbtxt_path) as file:
        lines = file.readlines()
        for i in range(1, len(lines)):
            pre_content = lines[i - 1]
            content = lines[i]
            if pre_content.startswith("  target_class:") and content.startswith("  target_class_string:"):
                target_class_string = content.split(":")[1].replace('"', "").replace('\n', '').replace(' ', '')
                target_class = pre_content.split(":")[1].replace('"', "").replace('\n', '').replace(' ', '')
                map2[target_class] = target_class_string
                images_name = map1[target_class_string]
                map3[target_class] = images_name

    return map3


def train(images_dir, id_name_map):
    with tf.Session() as session:
        softmax_tensor = session.graph.get_tensor_by_name('softmax:0')

    # 遍历目录
    for root, dirs, files in os.walk(images_dir):

        for file in files:

            """ 使用模型得到预测值
            """
            # 载入图片
            image_data = tf.gfile.FastGFile(os.path.join(root, file), 'rb').read()

            # 图片格式是jpg格式
            predictions = session.run(softmax_tensor, {'DecodeJpeg/contents:0': image_data})

            """ 预测结果的处理
            """
            # 把结果转为1维数据
            predictions = np.squeeze(predictions)

            # 打印图片路径及名称
            image_path = os.path.join(root, file)
            print(image_path)

            # 显示图片
            img = Image.open(image_path)
            plt.imshow(img)
            plt.axis('off')
            plt.show()

            """得到概率最大的5个结果
            """
            # 排序  argsort从小到大排列的索引数组，  [-5:] 后五个   [::-1] 返回来，倒置。
            top_k = predictions.argsort()[-5:][::-1]
            print(top_k)
            for node_id in top_k:
                # 获取分类名称
                human_string = id_name_map[str(node_id)]
                # 获取该分类的置信度
                score = predictions[node_id]
                print('%s (score = %.5f)' % (human_string, score))
            print()


if __name__ == '__main__':
    DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
    model_tmp_path = "D:\IdeaProjects\project-mate\mywork\\tensorflow-learning\lession\\tmp"
    model_path = "D:\IdeaProjects\project-mate\mywork\\tensorflow-learning\lession\\model"

    # download_file(DATA_URL, model_tmp_path)

    extract(model_tmp_path + "\inception-2015-12-05.tgz", model_path)
    id_name_map = load_resource(model_path)
    load_graph(model_path)
    train("D:\IdeaProjects\project-mate\mywork\\tensorflow-learning\lession\images", id_name_map)
    print(len(id_name_map.keys()))
