import os
import sys
import tensorflow as tf
from tensorflow import gfile
from tensorflow import logging
import pprint
import pickle
import numpy as np



# 卷积神经网络路径
# model_file = "./data/checkpoint_inception_v3/inception_v3_graph_def.pb"
model_file = "Demo03/flickr30k/checkpoint_inception_v3/inception_v3_graph_def.pb"
# 图像描述文件
input_description_file = "Demo03/flickr30k/flickr30k.token"
# 图像样本保存目录
input_img_dir = ""
# 图像特征提取  保存目录
output_folder = "Demo03/flickr30k/download_inception_v3_features"


batch_size = 1000
if not gfile.Exists(output_folder):
    gfile.MakeDirs(output_folder)

'''
    (1)从图像到描述的字典{图像：[描述1，描述2，...]}
        param token_file : token 文件
        return 字典
'''
def parse_token_file(token_file):

    img_name_to_tokens = {}
    with gfile.GFile(token_file,'r') as f:
        lines = f.readLines()

    for line in lines:
        img_id,description = line.strip('\r\n').split('\t')
        img_name,_= img_id.split('#')
        img_name_to_tokens.setdefault(img_name,[])
        img_name_to_tokens[img_name].append(description)
    return img_name_to_tokens

img_name_to_tokens = parse_token_file(input_description_file)
# 获得图像名称
all_img_names = list(img_name_to_tokens.keys())
# 图像总数
logging.info('num i of all images: %d' %len(all_img_names))

'''
导入预训练好的计算图
param model_file:计算图路径
return :none
'''
def load_pretrained_inception_v3(model_file):
    with gfile.FastGFile(model_file,'rb') as f:
        # 建立一个空的计算图
        graph_def = tf.GraphDef()
        # 将文件内容解析到这个空的计算图中
        graph_def.ParseFromString(f.read())
        _ = tf.import_graph_def(graph_def,nanme = '')

load_pretrained_inception_v3(model_file)

# 确定batch_size的个数
num_batches= int(len(all_img_names) / batch_size)
# 是否可以整除
if(len(all_img_names) % batch_size) != 0:
    num_batches += 1

with tf.Session as sess:
    # 通过名称取出某一层的特征图
    # :0——指的是该张量的第几个输出分支【大部分tensor只有一个输出，即:0】
    second_to_last_tensor = sess.graph.get_tensor_by_name("pool_3:0")
    for i in range(num_batches):
        batch_img_names = all_img_names[i * batch_size:(i + 1) * batch_size]
        batch_features = []
        for img_name in batch_img_names:
            img_path = os.path.join(input_img_dir,img_name)

            if not gfile.Exists(img_path):
                raise Exception("%s doesn't exists" %img_path)
            logging.info('processing img % s' %img_name)


        #tf.gfile.FastGFile(path,decodestyle)——实现对图片的读取
        if not gfile.Exists(img_path):
            raise Exception("%s doesn't exists" % img_path)
        img_data = gfile.FastGFile(img_path,'rb').read()
        feature_vector = sess.run(second_to_last_tensor,
                                  feed_dict = {'DecodeJpeg/contents:0':img_data})

        # 此刻的feature_vector:Tensor("pool_3:0,shape = (1,1,1,2048),dtype = float32)
        batch_features.append(feature_vector)

    # np.vstack()——按垂直的方向（行顺序）堆叠数组构成一个新的数组，堆叠的数组需要有相同的维度
    batch_features = np.vstack((batch_features))
    output_filename = os.path.join(output_folder,'img_features-%d.pickle' % i)

    logging.info('writing to file %s' %output_filename)
    with gfile.GFile(output_filename,'w') as f:
        # pickle.dump()——直接将对象序列化后，将对象obj保存在文件file中
        pickle.dump((batch_img_names,batch_features),f)

