import tensorflow as tf
import model_zoo.net.resnet_v2 as resnet_v2
import model_zoo.net.inception_v3 as inception_v3
import model_zoo.net.mobilenet_v1 as mobilenet_v1
import tensorflow.contrib.slim as slim
from tensorflow.python.util import deprecation
deprecation._PRINT_DEPRECATION_WARNINGS = False
import sys
sys.path.append("./")
import numpy as np
import time
import pandas as pd
import model_zoo.utils as utils
import cv2
import pickle
"""
gpu-partition demo
1. baseline: all models run in sequence
2. gpu-partition: some layers of each model run individually, but some run in batch.
"""
final_endpoints_dict = {"inception_v3": "Predictions",
                        "resnet_v2":"predictions",
                        "mobilenet_v1":"Predictions"}
class GPULatencyMeasure:
    def average_partition_latency(self,model_name,layer_name_list):
        result = {}
        # set for mobile latency
        #for k in range(1, len(layer_name_list)):

        # set for edge latency
        for k in range(0, len(layer_name_list)-1):
            partition_layer_name = layer_name_list[k]
            # set for mobile latency
            # source_name = "model_partition_latency/" + model_name + "/sigma/tx2/I-X/" + model_name + "_" + partition_layer_name + "_batch=1_sigma.xlsx"
            # dest_name = "model_partition_latency/"+model_name+"/"+model_name+"_partition_latency_tx2.xlsx"

            # set for edge latency
            source_name = "model_partition_latency/" + model_name + "/sigma/workstation/" + model_name + "_" + partition_layer_name.replace("/","_") + "_batch=1_sigma.xlsx"
            if model_name == "resnet":
                partition_layer_name = partition_layer_name.replace("/","_")

            data = pd.read_excel(source_name,index_col=0)["batch=1"].values
            result[partition_layer_name] = np.around(np.average(data),3)
        result_pd = pd.DataFrame(result,index=["batch=1"])
        dest_name = "model_partition_latency/" + model_name + "/" + model_name + "_partition_latency_workstation.xlsx"
        result_pd.to_excel(dest_name)

    def sigma(self,model_name,layer_name_list):
        for k in range(0, len(layer_name_list)-1):
        # for k in range(1, len(layer_name_list)):
            partition_layer_name = layer_name_list[k]
            #file_name = "./model_partition_latency/inception/original/tx2/I-X/inception_" + partition_layer_name + "_batch=1.xlsx"
            if model_name == "resnet":
                partition_layer_name = partition_layer_name.replace("/","_")
            # set for mobile latency
            source_file = "./model_partition_latency/"+model_name+"/original/tx2/I-X/"+model_name+"_" + partition_layer_name + "_batch=1.xlsx"
            dest_file = "./model_partition_latency/" + model_name + "/sigma/tx2/I-X/" + model_name + "_" + partition_layer_name + "_batch=1_sigma.xlsx"

            # set for edge latency
            source_file = "./model_partition_latency/" + model_name + "/original/workstation/" + model_name + "_" + partition_layer_name + "_batch=1.xlsx"
            dest_file = "./model_partition_latency/" + model_name + "/sigma/workstation/" + model_name + "_" + partition_layer_name + "_batch=1_sigma.xlsx"
            data = pd.read_excel(source_file,index_col=0)["batch=1"].values
            result = utils.get_2_sigma(data,dims=1)
            result_pd = pd.DataFrame({"batch=1":result})
            result_pd.to_excel(dest_file)

    def generate_layer_data(self,model_name,layer_name_list):
        models_path = "model_zoo/weights/" + model_name + ".ckpt"
        with tf.device("/device:GPU:0"):
             with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
                if model_name == "inception_v3":
                    data_format = [None,299,299,3]
                    with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
                        input_images = tf.placeholder(dtype=tf.float32, shape=data_format, name='input')
                        # Note that endpoints does not include layer Input.
                        out, endpoints = inception_v3.inception_v3(inputs= input_images,final_endpoint=final_endpoints_dict[model_name])
                        sess.run(tf.global_variables_initializer())
                        saver = tf.train.Saver()
                        saver.restore(sess, models_path)
                        for file_name in ["cat.jpg", "scorpion.jpg","dog.jpg", "pig.jpg"]:
                            layer_data = np.load("middle_data/" + model_name + "/input_"+file_name.split(".")[0] + ".npy")
                            layer_data = np.asarray(layer_data, dtype=np.float32)[np.newaxis, :]
                            for i in range(1,len(layer_name_list)-1):
                                result = sess.run(endpoints[layer_name_list[i]],feed_dict={input_images:layer_data})
                                np.save("middle_data/"+model_name+"/"+layer_name_list[i]+"_"+file_name.split(".")[0]+".npy",result[0])
                elif model_name == "resnet_v2":
                    data_format = [224, 224, 3]
                    input_images = tf.placeholder(dtype=tf.float32,shape=[None, data_format[0], data_format[1], data_format[2]],name='input')
                    with slim.arg_scope(resnet_v2.resnet_arg_scope()):
                        out, endpoints = resnet_v2.resnet_v2_50(inputs=input_images)
                        sess.run(tf.global_variables_initializer())
                        saver = tf.train.Saver()
                        print("########models_path",models_path)
                        saver.restore(sess, models_path)
                        for i in range(1,len(layer_name_list)-1):
                            print("++++++++++++",layer_name_list[i])
                            result = sess.run(endpoints[layer_name_list[i]],feed_dict={input_images:layer_data})
                            np.save("middle_data/"+model_name+"/"+layer_name_list[i].replace("/","_")+"_guitar.npy",result[0])

    def measure_partition_resnet(self,layer_name_list):
        """
        longer models run their individual part, then pack the shared parts running in batch.
        :return:
        """
        model_name = "resnet_v2"
        models_path = "model_zoo/weights/" + model_name + ".ckpt"
        input_info = {}
        print("=======================================")
        ''''''
        for k in range(0,len(layer_name_list)-1):
            partition_layer_name = layer_name_list[k]
            input_info[partition_layer_name] = {}
            layer_data = np.load("middle_data/" + model_name + "/" + partition_layer_name.replace("/","_") + "_guitar" + ".npy")
            input_info[partition_layer_name]["layer_data"] = np.asarray(layer_data, dtype=np.float32)[np.newaxis, :]

        data_format = [224,224,3]
        input_data = np.asarray(np.load("middle_data/" + model_name + "/input_guitar" + ".npy"), dtype=np.float32)[np.newaxis, :]
        # input_info["input"]["layer_data"]
        print("============",input_data.shape)
        with tf.device("/device:GPU:0"):
             input_images = tf.placeholder(dtype=tf.float32, shape=[None, data_format[0], data_format[1], data_format[2]], name='input')
             with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
                 with slim.arg_scope(resnet_v2.resnet_arg_scope()):
                    out, endpoints = resnet_v2.resnet_v2_50(inputs=input_images)
                    sess.run(tf.global_variables_initializer())
                    saver = tf.train.Saver()
                    saver.restore(sess, models_path)
                    #  test mobile latency
                    # for k in range(1,len(layer_name_list)):
                    # test edge latency
                    for k in range(0, len(layer_name_list)-1):
                    # k = len(self.layer_name_list)-1
                    #if True:
                        result = []
                        partition_layer_name = layer_name_list[k]
                        for i in range(201):
                            if k == 0:
                                # run the complete model
                                start_layer = input_images
                                partition_data = input_data
                            else:
                                start_layer = endpoints[partition_layer_name]
                                partition_data = input_info[partition_layer_name]["layer_data"]
                            a = time.time()
                            # set for mobile latency
                            # data = sess.run(start_layer,feed_dict={input_images: input_data})

                            # set for edge latency
                            data = sess.run(out, feed_dict={start_layer: partition_data})
                            b = time.time()
                            result.append(round(b-a,3))
                        result_pd = pd.DataFrame({"batch=1":result[1:]})
                        # file_name = "model_partition_latency/resnet/original/tx2/I-X/resnet_"+partition_layer_name.replace("/","_")+"_batch=1.xlsx"
                        file_name = "model_partition_latency/resnet/original/workstation/resnet_"+partition_layer_name.replace("/","_")+"_batch=1.xlsx"
                        print(file_name)
                        result_pd.to_excel(file_name)

    def measure_partial_resnet(self,start_point,end_point):
        model_name = "resnet_v2"
        models_path = "model_zoo/weights/" + model_name + ".ckpt"
        data_format=[224,224,3]
        start_data = np.load("middle_data/" + model_name + "/" + start_point.replace("/","_") + "_guitar" + ".npy")

        with tf.device("/device:GPU:0"):
             input_images = tf.placeholder(dtype=tf.float32, shape=[None, data_format[0], data_format[1], data_format[2]], name='input')
             start_data = np.asarray(start_data, dtype=np.float32)[np.newaxis, :]
             with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,intra_op_parallelism_threads=4,
                                                   log_device_placement=False)) as sess:
                 with slim.arg_scope(resnet_v2.resnet_arg_scope()):
                    out, endpoints = resnet_v2.resnet_v2_50(inputs=input_images)
                    sess.run(tf.global_variables_initializer())
                    saver = tf.train.Saver()
                    saver.restore(sess, models_path)
                    result_dict = {"part1": None, "part2": None}
                    result = []
                    for i in range(201):
                        a = time.time()
                        temp = sess.run(endpoints[end_point],feed_dict={input_images:start_data})
                        b = time.time()
                        result.append(round(b-a,3))
                    result = utils.get_2_sigma(result[1:],dims=1)
                    result_dict["part1"] = result
                    temp = np.repeat(temp,2,axis=0)
                    result = []
                    #print(temp.shape)
                    for i in range(201):
                        a = time.time()
                        sess.run(out,feed_dict={endpoints[end_point]:temp})
                        b = time.time()
                        result.append(round(b-a,3))

                    result = utils.get_2_sigma(result[1:],dims=1)
                    result_dict["part2"] = result
                    #print(result_dict)
                    result_pd = pd.DataFrame(result_dict)
                    result_pd.to_excel("model_partition_latency/resnet/resnet_seq_batch.xlsx")

    def dequantization_data(self,model_name,layer_name):
        min_value = np.load("quantization/"+model_name+"/min_value/" + layer_name + "_guitar_quan_min.npy").astype(np.float32)
        max_value = np.load("quantization/"+model_name+"/max_value/" + layer_name + "_guitar_quan_max.npy").astype(np.float32)
        quan_value = np.load("quantization/"+model_name+"/"+layer_name+"_guitar_quan.npy")
        range_value = np.subtract(max_value, min_value)
        range_value[range_value == 0] = 1
        restore = (quan_value*range_value/255.0+min_value).astype(np.float32)
        return restore

    def dequantization_data_manual(self,quan_value,min_value,max_value):
        range_value = np.subtract(max_value, min_value)
        range_value[range_value == 0] = 1
        restore = (quan_value*range_value/255.0+min_value).astype(np.float32)
        return restore

    def dequantization_data_tf14(self,quan_value,min_value,max_value):
        result = tf.quantization.dequantize(quan_value,min_value,max_value)
        return result
    def quantization_data_tf14(self,input_data):
        min_value = np.min(input_data)
        max_value = np.max(input_data)
        result = tf.quantization.quantize(input_data,min_value,max_value,tf.quint8)
        return result

    def quantization_data_manual(self,input_data):
        min_value = np.min(input_data, axis=tuple(range(0, len(input_data.shape) - 1)))
        max_value = np.max(input_data, axis=tuple(range(0, len(input_data.shape) - 1)))
        min_value = np.tile(min_value, input_data.shape[0:len(input_data.shape) - 1] + (1,))
        max_value = np.tile(max_value, input_data.shape[0:len(input_data.shape) - 1] + (1,))
        range_value = np.subtract(max_value, min_value)
        range_value[range_value == 0] = 1
        result = (np.subtract(input_data, min_value) * 255 / range_value).astype(np.uint8)
        return result,min_value,max_value

    def measure_complete_model(self,model_name,file_name_list):
        with tf.device("/device:GPU:0"):
            with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)) as sess:
                if model_name=="inception_v3":
                    #
                    with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
                        input_images = tf.placeholder(dtype=tf.float32, shape=[None,299,299,3], name='input')
                        # Note that endpoints does not include layer Input.
                        out, endpoints = inception_v3.inception_v3(inputs=input_images,
                                                                   final_endpoint=final_endpoints_dict[model_name])
                        sess.run(tf.global_variables_initializer())
                        saver = tf.train.Saver()
                        saver.restore(sess, "model_zoo/weights/" + model_name + ".ckpt")
                        for file_name in file_name_list:
                            input_data = self.read_image(file_name, 299, 299)[np.newaxis, :]
                            result = sess.run(out,feed_dict={input_images:input_data})
                            print(file_name,np.argmax(result[0]))
                elif model_name=="resnet_v2":
                    with slim.arg_scope(resnet_v2.resnet_arg_scope()):
                        input_images = tf.placeholder(dtype=tf.float32,shape=[None, 224, 224, 3],name='input')
                        out, endpoints = resnet_v2.resnet_v2_50(inputs=input_images)
                        sess.run(tf.global_variables_initializer())
                        saver = tf.train.Saver()
                        saver.restore(sess, "model_zoo/weights/" + model_name + ".ckpt")
                        for file_name in file_name_list:
                            input_data = self.read_image(file_name, 224, 224)[np.newaxis, :]
                            result = sess.run(out,feed_dict={input_images:input_data})
                            print(file_name,np.argmax(result[0]))

    def measure_partial_inception(self,file_name,start_point="input",end_point="Predictions"):
        """
        longer models run their individual part, then pack the shared parts running in batch.
        :return:
        """
        model_name = "inception_v3"
        models_path = "model_zoo/weights/" + model_name + ".ckpt"
        data_format = [None, 299, 299, 3]
        # input_info["input"]["layer_data"]
        #print("============",input_data.shape)
        with tf.device("/device:GPU:0"):
            # start_data = np.asarray(np.load("middle_data/" + model_name + "/input_guitar" + ".npy"), dtype=np.float32)[
            #             np.newaxis, :]
            #np.asarray(self.read_image("middle_data/airplay.jpg",data_format[1],data_format[2]), dtype=np.float32)[np.newaxis, :]
            input_data = self.read_image(file_name,299,299)[np.newaxis,:]
            with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,log_device_placement=False)) as sess:
                with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
                    input_images = tf.placeholder(dtype=tf.float32, shape=data_format, name='input')
                    # Note that endpoints does not include layer Input.
                    out, endpoints = inception_v3.inception_v3(inputs= input_images,final_endpoint=final_endpoints_dict[model_name])
                    sess.run(tf.global_variables_initializer())
                    saver = tf.train.Saver()
                    saver.restore(sess, models_path)
                    result_dict = {"part1": None, "part2": None}
                    result = []
                    layer_name_list = ["input", "Conv2d_1a_3x3", "Conv2d_2a_3x3", "Conv2d_2b_3x3",
                                       "MaxPool_3a_3x3", "Conv2d_3b_1x1", "Conv2d_4a_3x3",
                                       "MaxPool_5a_3x3", "Mixed_5b", "Mixed_5c",
                                       "Mixed_5d", "Mixed_6a", "Mixed_6b", "Mixed_6c",
                                       "Mixed_6d", "Mixed_6e", "Mixed_7a",
                                       "Mixed_7b", "Mixed_7c", 'Predictions']
                    for start_point in layer_name_list[1:-1]:
                        temp = sess.run(endpoints[start_point],feed_dict={input_images:input_data})
                        result = self.quantization_data_tf14(temp[0])
                        start_data = sess.run(self.dequantization_data_tf14(result.output,result.output_min,result.output_max))
                        #print("++++++++++",start_data)
                        temp = sess.run(out,feed_dict={endpoints[start_point]:start_data[np.newaxis,:]})
                        print("========**********==========",file_name,start_point,np.argmax(temp[0]))
                        #b = time.time()
                        #result.append(round(b-a,3))
                    '''
                    result = utils.get_2_sigma(result[1:],dims=1)
                    result_dict["part1"] = result
                    temp = np.repeat(temp,2,axis=0)
                    result = []
                    #print(temp.shape)
                    for i in range(201):
                        a = time.time()
                        sess.run(out,feed_dict={endpoints[end_point]:temp})
                        b = time.time()
                        result.append(round(b-a,3))

                    result = utils.get_2_sigma(result[1:],dims=1)
                    result_dict["part2"] = result
                    #print(result_dict)
                    result_pd = pd.DataFrame(result_dict)
                    result_pd.to_excel("model_partition_latency/inception/inception_seq_batch.xlsx")
                    '''


    def read_image(self, filename, resize_height, resize_width, normalization=True):
        '''
        读取图片数据,默认返回的是uint8,[0,255]
        :param filename:
        :param resize_height:
        :param resize_width:
        :param normalization:是否归一化到[0.,1.0]
        :return: 返回的图片数据
        '''
        print("middle_data/"+filename)
        bgr_image = cv2.imread("middle_data/"+filename)
        if len(bgr_image.shape) == 2:  # 若是灰度图则转为三通道
            print("Warning:gray image", filename)
            bgr_image = cv2.cvtColor(bgr_image, cv2.COLOR_GRAY2BGR)

        rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)  # 将BGR转为RGB
        # show_image(filename,rgb_image)
        # rgb_image=Image.open(filename)
        if resize_height > 0 and resize_width > 0:
            rgb_image = cv2.resize(rgb_image, (resize_width, resize_height))
        rgb_image = np.asanyarray(rgb_image)
        if normalization:
            # 不能写成:rgb_image=rgb_image/255
            rgb_image = rgb_image / 255.0
            # rgb_image = rgb_image / 255.0
        rgb_image = np.asarray(rgb_image,dtype=np.float32)
        return rgb_image

    def get_layer_data_volume(self,model_name,layer_name_list):
        np_size = []
        bytes_size = []
        for i in range(len(layer_name_list) - 1):
            partition_layer_name = layer_name_list[i]
            if "resnet" in model_name:
                partition_layer_name = partition_layer_name.replace("/","_")
            size = np.load("middle_data/" + model_name + "/" + partition_layer_name + "_guitar" + ".npy")
            # print(partition_layer_name,size.shape,size.dtype)
            np_size.append(size.shape[0] * size.shape[1] * size.shape[2] * 4)
            bytes_size.append(len(pickle.dumps(size)))
        #print(bytes_size)
        volume_pd = pd.DataFrame({"np_size": np_size, "bytes_size": bytes_size})
        model_name = model_name.split("_")[0]
        volume_pd.to_excel("model_partition_latency/"+model_name+"/"+model_name+"_volume.xlsx")

    def measure_partition_inception(self):
        """
        longer models run their individual part, then pack the shared parts running in batch.
        :return:
        """
        model_name = "inception_v3"
        layer_name_list = ["input","Conv2d_1a_3x3", "Conv2d_2a_3x3", "Conv2d_2b_3x3",
                   "MaxPool_3a_3x3", "Conv2d_3b_1x1", "Conv2d_4a_3x3",
                   "MaxPool_5a_3x3", "Mixed_5b", "Mixed_5c",
                   "Mixed_5d", "Mixed_6a", "Mixed_6b", "Mixed_6c",
                   "Mixed_6d", "Mixed_6e", "Mixed_7a",
                   "Mixed_7b", "Mixed_7c", 'Predictions']
        models_path = "model_zoo/weights/" + model_name + ".ckpt"
        input_info = {}
        '''
        for k in range(1,len(self.layer_name_list)):
            partition_layer_name = self.layer_name_list[k]
            input_info[partition_layer_name] = {}
            layer_data = np.load("middle_data/" + model_name + "/" + partition_layer_name + "_guitar" + ".npy")
            input_info[partition_layer_name]["layer_data"] = np.asarray(layer_data, dtype=np.float32)[np.newaxis, :]
        '''
        data_format = [None, 299, 299, 3]
        input_data = np.asarray(np.load("middle_data/" + model_name + "/input_guitar" + ".npy"), dtype=np.float32)[np.newaxis, :]
        # input_info["input"]["layer_data"]
        #print("============",input_data.shape)
        #with tf.device("/device:GPU:0"):
        with tf.device("/device:CPU:0"):
             with tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=4,
                                                   log_device_placement=False)) as sess:
                with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
                    input_images = tf.placeholder(dtype=tf.float32, shape=data_format, name='input')
                    # Note that endpoints does not include layer Input.
                    out, endpoints = inception_v3.inception_v3(inputs= input_images,final_endpoint=final_endpoints_dict[model_name])
                    sess.run(tf.global_variables_initializer())
                    saver = tf.train.Saver()
                    saver.restore(sess, models_path)
                    #for k in range(1,len(self.layer_name_list)):
                    #k = len(layer_name_list)-1
                    k=1
                    if True:
                        result = []
                        partition_layer_name = layer_name_list[k]
                        for i in [1]:#range(201):
                            if k == 0:
                                # run the complete model
                                start_layer = input_images
                                #partition_data = input_data
                            else:
                                start_layer = endpoints[partition_layer_name]
                                #partition_data = input_info[partition_layer_name]["layer_data"]
                            a = time.time()
                            print("**************",input_data)
                            # data = sess.run(start_layer,feed_dict={input_images: input_data})
                            data = sess.run(out, feed_dict={start_layer: input_data})
                            print(data)
                            b = time.time()
                            result.append(round(b-a,3))
                        result_pd = pd.DataFrame({"batch=1":result[1:]})
                        #file_name = "model_partition_latency/inception/original/tx2/I-X/inception_"+partition_layer_name+"_batch=1.xlsx"
                        file_name = "model_partition_latency/inception/original/rasp/inception_input_batch=1.xlsx"
                        print(file_name)
                        #result_pd.to_excel(file_name)


gpu = GPULatencyMeasure()
file_name_list = ["airplay.jpg","cat.jpg","chicken.jpg","dog.jpg","pig.jpg","scorpion.jpg","snack.jpg"]
# gpu.measure_complete_model(model_name="inception_v3",file_name_list=file_name_list)
''''''
for file_name in file_name_list[1:]:
    gpu.measure_partial_inception(file_name)
    # print()

# gpu.measure_partition_inception()
# data = gpu.read_image("middle_data/guitar.jpg",299,299)
# input_data = np.asarray(np.load("middle_data/inception_v3/input_guitar.npy"),dtype=np.float32)
# print(input_data)

'''
model_name = "resnet_v2"
resnet_layer_name_list = ["input", "conv1", "pool1", 'block1/unit_1', 'block1/unit_2', 'block1/unit_3',
                   'block2/unit_1', 'block2/unit_2', 'block2/unit_3', 'block2/unit_4',
                   'block3/unit_1', 'block3/unit_2', 'block3/unit_3', 'block3/unit_4',
                   'block3/unit_5', 'block3/unit_6', 'block4/unit_1', 'block4/unit_2',
                   'block4/unit_3', "global_pool", 'predictions']
gpu.measure_partial_resnet(resnet_layer_name_list[0],resnet_layer_name_list[15])
'''

# gpu.measure_partial_inception(layer_name_list[0],layer_name_list[16])
#
#gpu.get_layer_data_volume(model_name,resnet_layer_name_list)

#==============generate the input data============
'''
model_name = "inception_v3"
for file_name in ["cat.jpg", "scorpion.jpg","dog.jpg", "pig.jpg"]:
    input_data = gpu.read_image(file_name,299,299)
    np.save("middle_data/" + model_name + "/input_"+file_name.split(".")[0] + ".npy",input_data)
'''
#layer_data = np.load("middle_data/" + model_name + "/input_guitar" + ".npy")
#


#==============generate the layer data and volume ============
model_name = "inception_v3"
layer_name_list = ["input","Conv2d_1a_3x3", "Conv2d_2a_3x3", "Conv2d_2b_3x3",
                   "MaxPool_3a_3x3", "Conv2d_3b_1x1", "Conv2d_4a_3x3",
                   "MaxPool_5a_3x3", "Mixed_5b", "Mixed_5c",
                   "Mixed_5d", "Mixed_6a", "Mixed_6b", "Mixed_6c",
                   "Mixed_6d", "Mixed_6e", "Mixed_7a",
                   "Mixed_7b", "Mixed_7c", 'Predictions']
# gpu.generate_layer_data(model_name,layer_name_list)
# gpu.get_layer_data_volume()

#gpu.measure_partition_resnet(resnet_layer_name_list)
#model_name = model_name.split("_")[0]
#gpu.sigma(model_name,resnet_layer_name_list)
#gpu.average_partition_latency(model_name,resnet_layer_name_list)


