import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
import os
# targetfolder
import cv2
import multiprocessing as mp
import model_zoo.keras.inception_v3_change_layer as inception_v3
import pandas as pd
class TFProcess():
    def qunat(self,value):
        min_range = tf.math.reduce_min(value)
        max_range = tf.math.reduce_max(value)
        middle_data = tf.quantization.quantize(value, min_range, max_range, tf.qint8)
        return middle_data

    def resize_image(self,image_bytes,resize_height=299,resize_width=299,normalization=True):
        nparr = np.fromstring(image_bytes, np.uint8)
        bgr_image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
        # 1.3 resize the input image
        rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)  # 将BGR转为RGB
        # show_image(filename,rgb_image)
        # rgb_image=Image.open(filename)
        if resize_height > 0 and resize_width > 0:
            rgb_image = cv2.resize(rgb_image, (resize_width, resize_height))
        rgb_image = np.asanyarray(rgb_image)
        if normalization:
            # 不能写成:rgb_image=rgb_image/255
            rgb_image = rgb_image / 255.0
            # rgb_image = rgb_image / 255.0
        rgb_image = np.asarray(np.expand_dims(rgb_image, axis=0), dtype=np.float32)
        return rgb_image

    def call_write_tfrecords(self):
        mp.Process(target=self.write_tfrecords, args=(1, 10000)).start()
        mp.Process(target=self.write_tfrecords, args=(10000, 20000)).start()
        mp.Process(target=self.write_tfrecords, args=(20000, 30000)).start()
        mp.Process(target=self.write_tfrecords, args=(30000, 40000)).start()
        mp.Process(target=self.write_tfrecords, args=(40000, 50000)).start()
        mp.Process(target=self.write_tfrecords, args=(50000, 60000)).start()
        mp.Process(target=self.write_tfrecords, args=(60000, 70000)).start()
        mp.Process(target=self.write_tfrecords, args=(70000, 80000)).start()
        mp.Process(target=self.write_tfrecords, args=(80000, 90000)).start()
        mp.Process(target=self.write_tfrecords, args=(90000, 100001)).start()


    def write_tfrecords(self,start,end):
        '''

        :param start:
        :param end:
        :return:
        '''
        def _int64_feature(value):
            """Returns an int64_list from a bool / enum / int / uint."""
            return tf.train.Feature(int64_list=tf.train.Int64List(value=value))

        def _bytes_feature(value):
            """Returns a bytes_list from a string / byte."""
            if isinstance(value, type(tf.constant(0))):
                value = value.numpy()
            return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))

        filename = "/home/wujing/dataset/imagenet2012_test_" + str(start) + "_" + str(end) + ".tfrecord"
        writer = tf.io.TFRecordWriter(filename)
        image_path = "/home/wujing/sotfware/dataset/ILSVRC2012_img_test/test"
        image_names = []
        for k in range(start, end):
            image_names.append("ILSVRC2012_test_" + f"{k:08}" + ".JPEG")

        for k in range(len(image_names)):
            image_string = open(image_path + "/" + image_names[k], 'rb').read()
            image_shape = tf.image.decode_jpeg(image_string).shape
            feature = {
                'image': _bytes_feature(image_string),
                'shape': _int64_feature(image_shape)
            }
            record_bytes = tf.train.Example(features=tf.train.Features(feature=feature)).SerializeToString()
            writer.write(record_bytes)

    def get_dataset(self,start,end):
        def _parse_image_function(example_proto):
            # Parse the input tf.train.Example proto using the dictionary above.
            image_feature_description = {
                'image': tf.io.FixedLenFeature([], tf.string),
                'shape': tf.io.FixedLenFeature([3], tf.int64)
            }
            return tf.io.parse_single_example(example_proto, image_feature_description)

        filename = "/home/wujing/dataset/imagenet2012_test_" + str(start) + "_" + str(end) + ".tfrecord"
        raw_image_dataset = tf.data.TFRecordDataset(filename)
        parsed_image_dataset = raw_image_dataset.map(_parse_image_function)
        return parsed_image_dataset


    def read_tfrecords(self):

        # 1. create tensorflow models
        '''
        create mobile models and edge models to get the final prediction results
        '''
        layer_names = ["input", "conv2d_1", "conv2d_2", "conv2d_3", "max_pooling2d_1", "conv2d_4", "conv2d_5",
                       "max_pooling2d_2", "mixed0",
                       "mixed1", "mixed2", "mixed3", "mixed4", "mixed5", "mixed6", "mixed7", "mixed8", "mixed9",
                       "mixed10"]

        layer_shapes = {'input': [299, 299, 3], 'conv2d_1': [149, 149, 32], 'conv2d_2': [147, 147, 32],
                        'conv2d_3': [147, 147, 64],
                        'max_pooling2d_1': [73, 73, 64], 'conv2d_4': [73, 73, 80], 'conv2d_5': [71, 71, 192],
                        'max_pooling2d_2': [35, 35, 192],
                        'mixed0': [35, 35, 256], 'mixed1': [35, 35, 288], 'mixed2': [35, 35, 288],
                        'mixed3': [17, 17, 768],
                        'mixed4': [17, 17, 768], 'mixed5': [17, 17, 768], 'mixed6': [17, 17, 768],
                        'mixed7': [17, 17, 768],
                        'mixed8': [8, 8, 1280], 'mixed9': [8, 8, 2048], 'mixed10': [8, 8, 2048]}


        nano_quant = tf.function(self.qunat, experimental_relax_shapes=True)
        result_dict = {}
        for layer_name in [layer_names[0]]:#layer_names[1:]:
            # 1.1 create mobile model
            #mobile_model = tf.function(inception_v3.InceptionV3_mobile(input_shape=[299, 299, 3], partition_layer=layer_name))

            # 1.2 create edge model
            layer_shape = layer_shapes[layer_name]
            edge_model = tf.function(inception_v3.InceptionV3_edge(partition_layer=layer_name, input_shape=layer_shape))
            complete_model = tf.function(inception_v3.InceptionV3_edge(input_shape=[299, 299, 3]))
            result_dict[layer_name] = []
            for start in [1,10000,20000,30000,40000,50000,60000,70000,80000,90000]:
                # end = start+10000-1
                if start == 1:
                    end = 10000
                else:
                    end = start+10000

                if end == 100000:
                    end = end+1
                i = 0
                parsed_image_dataset = self.get_dataset(start,end)
                #k = 0
                for image_features in parsed_image_dataset:
                    image_bytes = image_features['image'].numpy()
                    # 1.3 prepare input image
                    image = tf.constant(self.resize_image(image_bytes))

                    # 1.4 run mobile model to get the intermediate data
                    #middle_data = mobile_model(image)

                    # 1.5 quantize and dequantize the model
                    result = nano_quant(image)
                    result = tf.quantization.dequantize(result.output, result.output_min, result.output_max)

                    # 1.6 run the edge model and get the predictions
                    #print("edge",edge_model(result).shape)
                    dequant_result = np.argmax(edge_model(result).numpy()[0])
                    complete_result = np.argmax(complete_model(image).numpy()[0])
                    #print(dequant_result,complete_result)
                    if dequant_result != complete_result:
                        i = i + 1
                result_dict[layer_name].append(i)
                # print(start,end,"========final results=========",i)
        result_pd = pd.DataFrame(result_dict)
        result_pd.to_excel("input_quantization_mAP.xlsx")
            #break


tfp = TFProcess()
tfp.read_tfrecords()







'''
counts = 1000000
result = np.ones(counts)
for start in [1,1000,2000,3000,4000,5000,6000,7000,8000,9000]:
    if start!=9000:
        end = 100001
read_tfrecords(1,1000)
'''
