import re
import sys

import numpy as np
import tensorflow as tf
import base64
# import simhash_float
from math import ceil

MODEL_PATH = 'resource/models/classification/inception-2015-12-05/classify_image_graph_def.pb'
LABEL_PATH = 'resource/models/classification/inception-2015-12-05/imagenet_2012_challenge_label_map_proto.pbtxt'
UID_PATH = 'resource/models/classification/inception-2015-12-05/imagenet_synset_to_human_label_map.txt'
NUM_TOP_PREDICTION = 5


# def hamming_dist(hash1, hash2, hash_bits):
#     x = (hash1 ^ hash2) & ((1 << hash_bits) - 1)
#     tot = 0
#     while x:
#         tot += 1
#         x &= x - 1
#     return tot
#
#
# def similarity(hash1, hash2):
#     a = float(hash1)
#     b = float(hash2)
#     if a > b:
#         return b / a
#     else:
#         return a / b
#

def get_feat(d, threshold=0.5):
    return np.where(d > threshold, 1, 0)


def hamming_dist(h1, h2):
    return sum(h1 ^ h2)


def load_model(model_file):
    with tf.gfile.FastGFile(model_file, 'rb') as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
        _ = tf.import_graph_def(graph_def, name='')


class NodeLookup:
    """Converts integer node ID's to human readable labels."""

    def __init__(self,
                 label_lookup_path=None,
                 uid_lookup_path=None):
        self.node_lookup = self.load(label_lookup_path, uid_lookup_path)

    def load(self, label_lookup_path, uid_lookup_path):
        """Loads a human readable English name for each softmax node.

        Args:
          label_lookup_path: string UID to integer node ID.
          uid_lookup_path: string UID to human-readable string.

        Returns:
          dict from integer node ID to human-readable string.
        """
        if not tf.gfile.Exists(uid_lookup_path):
            tf.logging.fatal('File does not exist %s', uid_lookup_path)
        if not tf.gfile.Exists(label_lookup_path):
            tf.logging.fatal('File does not exist %s', label_lookup_path)

        # Loads mapping from string UID to human-readable string
        proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()
        uid_to_human = {}
        p = re.compile(r'[n\d]*[ \S,]*')
        for line in proto_as_ascii_lines:
            parsed_items = p.findall(line)
            uid = parsed_items[0]
            human_string = parsed_items[2]
            uid_to_human[uid] = human_string

        # Loads mapping from string UID to integer node ID.
        node_id_to_uid = {}
        proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()
        for line in proto_as_ascii:
            if line.startswith('  target_class:'):
                target_class = int(line.split(': ')[1])
            if line.startswith('  target_class_string:'):
                target_class_string = line.split(': ')[1]
                node_id_to_uid[target_class] = target_class_string[1:-2]

        # Loads the final mapping of integer node ID to human-readable string
        node_id_to_name = {}
        for key, val in node_id_to_uid.items():
            if val not in uid_to_human:
                tf.logging.fatal('Failed to locate: %s', val)
            name = uid_to_human[val]
            node_id_to_name[key] = name

        return node_id_to_name

    def id_to_string(self, node_id):
        if node_id not in self.node_lookup:
            return ''
        return self.node_lookup[node_id]


def _binarize_fea(x, thresh):
    '''binary and pack feature vector'''
    binary_vec = np.where(x >= thresh, 1, 0)
    f_len = binary_vec.shape[0]
    if f_len % 32 != 0:
        new_size = int(ceil(f_len / 32.) * 32)
        num_pad = new_size - f_len
        binary_vec = np.pad(binary_vec, (num_pad, 0), 'constant')

    return np.packbits(binary_vec).view('uint32').tolist()


def tokenize(b64_str, str_len=4):
    length = len(b64_str)
    n = int(length / str_len)
    out = []
    for i in range(n):
        out.append(b64_str[int(i * str_len): int((i+1) * str_len)])
    return out


def run_inference_on_image(model_file, label_path, uid_path, image):
    """Runs inference on an image.

    Args:
      model_file: model path
      image: Image file name.


    Returns:
      Nothing
    """
    if not tf.gfile.Exists(image):
        tf.logging.fatal('File does not exist %s', image)
    image_data = tf.gfile.FastGFile(image, 'rb').read()

    # Creates graph from saved GraphDef.
    load_model(model_file)

    with tf.Session() as sess:
        # Some useful tensors:
        # 'softmax:0': A tensor containing the normalized prediction across
        #   1000 labels.
        # 'pool_3:0': A tensor containing the next-to-last layer containing 2048
        #   float description of the image.
        # 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG
        #   encoding of the image.
        # Runs the softmax tensor by feeding the image_data as input to the graph.
        softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
        pool3_tensor = sess.graph.get_tensor_by_name('pool_3:0')

        # for op in tf.get_default_graph().get_operations():
        #     print(str(op.name))

        predictions, feat = sess.run([softmax_tensor, pool3_tensor],
                               {'DecodeJpeg/contents:0': image_data})
        predictions = np.squeeze(predictions)
        feat = np.squeeze(feat)
        # print([int(f * 256) for f in feat]) # simhash
        # print(feat.shape)
        feat = get_feat(feat, 0.5)
        feat2 = base64.b64encode(int(''.join([str(k) for k in feat.tolist()]), base=2).to_bytes(int(len(feat) / 8),
                                                                                               byteorder='big')).decode()

        print(' '.join(tokenize(feat2)))
        # print(_binarize_fea(feat, 0.5))
        # print(len(feat))

        # Creates node ID --> English string lookup.
        node_lookup = NodeLookup(label_path, uid_path)

        top_k = predictions.argsort()[-NUM_TOP_PREDICTION:][::-1]
        print(top_k)
        # for k, v in node_lookup.node_lookup.items():
        #     print(k, '\t', v)
        for node_id in top_k:
            human_string = node_lookup.id_to_string(node_id)
            score = predictions[node_id]
            print('%s (score = %.5f)' % (human_string, score))
        print(int(''.join([str(k) for k in feat.tolist()]), base=2).to_bytes(int(len(feat)/8), byteorder='big'))
        print(predictions[top_k])
    return feat, top_k, predictions[top_k]


def extract_feature(model_file, images, outfile):
    """Runs inference on an image.

    Args:
      model_file: model path
      image: Image file name.


    Returns:
      Nothing
    """


    # Creates graph from saved GraphDef.
    load_model(model_file)

    w_fd = open(outfile, 'w+')

    with tf.Session() as sess:
        softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
        pool3_tensor = sess.graph.get_tensor_by_name('pool_3:0')
        for image in images:
            if not tf.gfile.Exists(image):
                tf.logging.fatal('File does not exist %s', image)
            image_data = tf.gfile.FastGFile(image, 'rb').read()

            try:
                predictions, feat = sess.run([softmax_tensor, pool3_tensor],
                                       {'DecodeJpeg/contents:0': image_data})
                predictions = np.squeeze(predictions)
                feat = np.squeeze(feat)
                feat = get_feat(feat, 0.5)
                top_k = predictions.argsort()[-NUM_TOP_PREDICTION:][::-1]
                feat = base64.b64encode(int(''.join([str(k) for k in feat.tolist()]), base=2).to_bytes(int(len(feat)/8), byteorder='big')).decode()
                line = feat + '\t' + ' '.join([str(k) for k in top_k]) + '\t' + ' '.join([str(k) for k in predictions[top_k]]) + '\t' + image
                w_fd.write(line + '\n')
            except Exception as e:
                print(str(e))

if __name__ == '__main__':
    f1 = run_inference_on_image(MODEL_PATH, LABEL_PATH, UID_PATH, sys.argv[1])[0]
    # f2 = run_inference_on_image(MODEL_PATH, LABEL_PATH, UID_PATH, sys.argv[2])[0]
    #
    # print(hamming_dist(f1, f2))
    # import glob
    #
    # img_dir = sys.argv[1]
    # out_file = sys.argv[2]
    # imgs = glob.glob('%s/*' % img_dir)
    #
    # extract_feature(MODEL_PATH, imgs, out_file)
