import tensorflow as tf
import model_zoo.net.resnet_v2 as resnet_v2
import model_zoo.net.inception_v3 as inception_v3
import model_zoo.net.mobilenet_v1 as mobilenet_v1
import tensorflow.contrib.slim as slim
from tensorflow.python.util import deprecation
deprecation._PRINT_DEPRECATION_WARNINGS = False
import sys
sys.path.append("./")
import numpy as np
import time
import pandas as pd
import pickle
import codecs
from io import BytesIO
import cv2
"""
gpu-partition demo
1. baseline: all models run in sequence
2. gpu-partition: some layers of each model run individually, but some run in batch.
"""
final_endpoints_dict = {"inception_v3": "Predictions",
                        "resnet_v2":"predictions",
                        "mobilenet_v1":"Predictions"}
class GPUPartition:
    def analyze_layer_data(self):
        result_list = []
        for k in range(len(self.layer_name_list)-1):
            layer_name = self.layer_name_list[k]
            layer_data = np.load("middle_data/" + self.model_name + "/"+layer_name+"_guitar" + ".npy")
            layer_data = np.asarray(layer_data, dtype=np.float32)[np.newaxis, :]
            """
             1. “Pickling” is the process whereby a Python object hierarchy is converted into a byte stream
            """
            result = pickle.dumps(layer_data)
            #print(len(result))
            f = BytesIO()
            #np.savez_compressed(f, frame=layer_data)
            np.savez(f, frame=layer_data)
            result_list.append(len(f.getvalue()))
            #np.savez(f, frame=layer_data)
            print(len(f.getvalue()))
            #print(self.layer_data_size[k])
            #break
        print(result_list)

    def generate_layer_data(self):
        model_name = "inception_v3"
        models_path = "model_zoo/weights/" + model_name + ".ckpt"
        layer_name_list = ["input","Conv2d_1a_3x3", "Conv2d_2a_3x3", "Conv2d_2b_3x3",
                   "MaxPool_3a_3x3", "Conv2d_3b_1x1", "Conv2d_4a_3x3",
                   "MaxPool_5a_3x3", "Mixed_5b", "Mixed_5c",
                   "Mixed_5d", "Mixed_6a", "Mixed_6b", "Mixed_6c",
                   "Mixed_6d", "Mixed_6e", "Mixed_7a",
                   "Mixed_7b", "Mixed_7c", 'Predictions']

        layer_data = np.load("middle_data/" + model_name + "/input_guitar" + ".npy")
        layer_data = np.asarray(layer_data, dtype=np.float32)[np.newaxis, :]
        data_format = [None, 299, 299, 3]

        with tf.device("/device:GPU:0"):
             with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
                with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
                    input_images = tf.placeholder(dtype=tf.float32, shape=data_format, name='input')
                    # Note that endpoints does not include layer Input.
                    out, endpoints = inception_v3.inception_v3(inputs= input_images,final_endpoint=final_endpoints_dict[model_name])
                    sess.run(tf.global_variables_initializer())
                    saver = tf.train.Saver()
                    saver.restore(sess, models_path)
                    for i in range(1,len(layer_name_list)-1):
                        result = sess.run(endpoints[layer_name_list[i]],feed_dict={input_images:layer_data})
                        np.save("middle_data/"+model_name+"/"+layer_name_list[i]+"_guitar.npy",result[0])

    def run_partition(self,input_images,input_data,sess,partition_layer):
        """
        run the partial model[input,partition_point], and return the results
        :param sess:
        :param out:
        :param partition_info:
        :return:
        """
        sess.run(partition_layer,feed_dict={input_images,input_data})

    def run_partition_inception(self):
        """
        longer models run their individual part, then pack the shared parts running in batch.
        :return:
        """
        model_name = "inception_v3"
        models_path = "model_zoo/weights/" + model_name + ".ckpt"
        '''
        partition_layer_names = self.layer_name_list
        layer_data_list = {}
        for partition_layer_name in partition_layer_names:
            layer_data = np.load("middle_data/" + model_name + "/" + partition_layer_name + "_guitar" + ".npy")
            layer_data_list[partition_layer_name] = np.asarray(layer_data, dtype=np.float32)[np.newaxis, :]
        '''
        data_format = [None, 299, 299, 3]
        input_data = np.asarray(np.load("middle_data/" + model_name + "/input_guitar" + ".npy"), dtype=np.float32)[np.newaxis, :]
        with tf.device("/device:GPU:0"):
             with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
                with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
                    input_images = tf.placeholder(dtype=tf.float32, shape=data_format, name='input')
                    # Note that endpoints does not include layer Input.
                    out, endpoints = inception_v3.inception_v3(inputs= input_images,final_endpoint=final_endpoints_dict[model_name])
                    sess.run(tf.global_variables_initializer())
                    saver = tf.train.Saver()
                    saver.restore(sess, models_path)
                    result = []
                    for op in endpoints:
                        print(op)
                    '''
                    for i in range(50):
                        #print(layer_name,input_info[layer_name]["layer_data"].shape)
                        a = time.time()
                        data = sess.run(endpoints[end_layer],feed_dict={start_layer:input_info[layer_name]["layer_data"]})
                        b = time.time()
                        temp.append(round(b-a,3))

                        '''

                    '''
                    result_title = []
                    for k in range(len(layer_index_list)):
                        if k<len(layer_index_list)-1:
                            name = str(layer_index_list[k])+"_"+str(layer_index_list[k+1])
                        else:
                            name = str(layer_index_list[k])+"_end"
                        result_title.append(name)
                    result = np.array(result)
                    result_pd = pd.DataFrame(data = result,columns=result_title,index = range(150))
                    result_pd.to_excel("model_partition_latency/inception/tx2/"+model_name+"_partial_latency_tx2.xlsx")
                    '''
    def partition_resnet(self,bw):
        layer_name_list = ["input", "conv1", "pool1", 'block1/unit_1', 'block1/unit_2', 'block1/unit_3',
                   'block2/unit_1', 'block2/unit_2', 'block2/unit_3', 'block2/unit_4',
                   'block3/unit_1', 'block3/unit_2', 'block3/unit_3', 'block3/unit_4',
                   'block3/unit_5', 'block3/unit_6', 'block4/unit_1', 'block4/unit_2',
                   'block4/unit_3', "global_pool", 'predictions']
        mobile_latency = np.array([0,0.012,0.012,0.01,0.012,0.013,0.017,0.019,0.021,
                                   0.023,0.027,0.03,0.033,0.036,0.039,0.041,0.051,0.06,0.069,0.069,0.072])
        layer_data_size = [602274,3211426,802978,3211427,3211427,802979
                        ,1605795,1605795,1605795,401571,802979,802979,802979,802979,802979
                        ,200867,401571,401571,401571,8355]
        edge_latency = [0.009,0.009,0.008,0.009,0.009,0.007,0.007,0.007,0.006,0.005,
                        0.004,0.004,0.004,0.003,0.003,0.002,0.002,0.002,0.001,0.001]
        result = self.partition_e2e(bw,layer_name_list,mobile_latency,layer_data_size,edge_latency)
        return result
    def partition_e2e(self,bw,layer_name_list,mobile_latency_list,layer_data_size,edge_latency_list):
        e2e = None
        M_list = []
        N_list = []
        E_list = []
        T_list = []
        bw = bw*1024*1024/8.0
        for k in range(len(layer_name_list)-1):
            # get mobile latency and network latency.
            mobile_latency = mobile_latency_list[k]
            network_latency = layer_data_size[k]/bw
            edge_latency = edge_latency_list[k]
            temp = np.around(mobile_latency+network_latency+edge_latency,3)
            M_list.append(mobile_latency)
            N_list.append(network_latency)
            E_list.append(edge_latency)
            T_list.append(temp)

            if e2e == None:
                e2e = temp
                select_point = 0
            elif temp<e2e:
                e2e = temp
                select_point = k
        return (M_list,N_list,E_list,T_list)

    def partition_inception(self,bw):
        """
        find out the partition point that ensures Inception with the minimal e2e under specific bandwidth.
        :param bw: Mbps. Mbit/s
        :return:
        """
        layer_name_list = ["input","Conv2d_1a_3x3", "Conv2d_2a_3x3", "Conv2d_2b_3x3",
                   "MaxPool_3a_3x3", "Conv2d_3b_1x1", "Conv2d_4a_3x3",
                   "MaxPool_5a_3x3", "Mixed_5b", "Mixed_5c",
                   "Mixed_5d", "Mixed_6a", "Mixed_6b", "Mixed_6c",
                   "Mixed_6d", "Mixed_6e", "Mixed_7a",
                   "Mixed_7b", "Mixed_7c", 'Predictions']
        # Inception runs on TX2
        mobile_latency = np.array([0,0.006,0.007,0.011,0.011,0.011,0.015,0.015,
                                        0.017,0.021,0.025,0.03,0.036,0.044,0.051,0.061,0.066,0.075,0.086,0.089])
        # model inference of Inception running on workstation
        # np.array([0,0.004,0.012,0.025,0.026,0.027,0.044,0.045,0.054,0.066,
        # 0.076,0.086,0.098,0.11,0.124,0.14,0.149,0.154,0.165,0.166])
        # compress into npz
        layer_data_size = [1072976, 2841890, 2766114, 5532066, 1364386, 1705442, 3871650,
                                940962, 1254563, 1411363, 1411363, 887971, 887971, 887971, 887971,
                                887971, 327843, 524451, 524451]


        edge_latency = np.array([0.014,0.015,0.015,0.016,0.014,0.014,0.015,0.013,0.013,0.012,
                             0.011,0.01,0.009,0.007,0.006,0.005,0.003,0.002,0.001])
        bw = bw*1024*1024/8 #transform into byte/s
        # 1. test each partition point
        result = self.partition_e2e(bw,layer_name_list=layer_name_list,mobile_latency=mobile_latency,
                           layer_data_size=layer_data_size,edge_latency = edge_latency)
        return result



class GPUPartitionBaselines:
    """
    Run the partial Inception, ResNet and MobileNet in sequence, and record the inference latency.
    """
    def run_baselines(self,input_images,input_info,out,endpoints,sess,model_name,layer_name_list):
        """
        call the specific layer and feed the corresponding data to run the partial model on GPU
        :param input_images:
        :param input_info:
        :param out: Last layer as Predictions
        :param endpoints: does not include layer Input
        :param sess:
        :param model_name:
        :param layer_name_list:
        :return:
        """
        layer_latency = []
        for i in range(150):
            latency_round = []
            for layer_name in input_info.keys():
                # 1. get partition layer's data and index
                layer_data = input_info[layer_name]["layer_data"]
                layer_index = input_info[layer_name]["layer_index"]
                # 2. call the corresponding layer
                if layer_index == 0:
                    a = time.time()
                    sess.run(out, feed_dict={input_images: layer_data})
                    b = time.time()
                else:
                    endpoint_name = layer_name_list[layer_index]
                    #print("--------",endpoint_name,endpoints[endpoint_name])
                    a = time.time()
                    sess.run(out, feed_dict={endpoints[endpoint_name]: layer_data})
                    b = time.time()

                # 3. record the running time
                c = b - a
                latency_round.append(round(c, 3))
            layer_latency.append(latency_round)

        # 4. save the results
        latency = np.array(layer_latency).T
        latency_dict = {}
        i = 0
        for layer_name in input_info.keys():
            latency_dict[layer_name] = latency[i]
            i = i + 1
        latency_pd = pd.DataFrame(latency_dict)
        latency_pd.to_excel("model_latency/" + model_name + "_sequency_latency.xlsx")
        print("model_latency/" + model_name + "_sequency_latency.xlsx")

    def run_baselines_inception(self):
        """
        run all the partial model in sequence, and the short model runs first.
        Note that the endpoints from Inception do not include layer Input.
        :return:
        """
        layer_name_list = ["input","Conv2d_1a_3x3", "Conv2d_2a_3x3", "Conv2d_2b_3x3",
                   "MaxPool_3a_3x3", "Conv2d_3b_1x1", "Conv2d_4a_3x3",
                   "MaxPool_5a_3x3", "Mixed_5b", "Mixed_5c",
                   "Mixed_5d", "Mixed_6a", "Mixed_6b", "Mixed_6c",
                   "Mixed_6d", "Mixed_6e", "Mixed_7a",
                   "Mixed_7b", "Mixed_7c", 'Predictions']
        model_name = "inception_v3"

        # 1. generate the partition info including partition layer name, and the corresponding data
        input_info = {"Mixed_6d": {"layer_index": 14, "layer_data": None},
                      "Mixed_5c": {"layer_index": 9, "layer_data": None},
                      "MaxPool_3a_3x3": {"layer_index":4, "layer_data": None},
                      "input": {"layer_index": 0, "layer_data": None}}
        for partition_layer_name in input_info.keys():
            layer_data = np.load("middle_data/" + model_name + "/" + partition_layer_name + "_guitar" + ".npy")
            input_info[partition_layer_name]["layer_data"] = np.asarray(layer_data, dtype=np.float32)[np.newaxis, :]
        models_path = "model_zoo/weights/" + model_name + ".ckpt"
        data_format = [None, 299, 299, 3]

        # 2. load the model into GPU, and call the partition layer to run the partial model.
        with tf.device("/device:GPU:0"):
             with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
                with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
                    input_images = tf.placeholder(dtype=tf.float32, shape=data_format, name='input')
                    # Note that endpoints does not include layer Input.
                    out, endpoints = inception_v3.inception_v3(inputs= input_images,final_endpoint=final_endpoints_dict[model_name])
                    sess.run(tf.global_variables_initializer())
                    saver = tf.train.Saver()
                    saver.restore(sess, models_path)
                    self.run_baselines(input_images,input_info,out,endpoints,sess,model_name,layer_name_list)

    def run_baselines_resnet(self):
        layer_name_list = ["input", "conv1", "pool1", 'block1/unit_1', 'block1/unit_2', 'block1/unit_3',
                     'block2/unit_1', 'block2/unit_2', 'block2/unit_3', 'block2/unit_4',
                     'block3/unit_1', 'block3/unit_2', 'block3/unit_3', 'block3/unit_4',
                     'block3/unit_5', 'block3/unit_6', 'block4/unit_1', 'block4/unit_2',
                     'block4/unit_3', "global_pool", 'predictions']
        model_name = "resnet_v2"

        input_info = {"block3/unit_5": {"layer_index": 14, "layer_data": None},
                      "block2/unit_4": {"layer_index": 9, "layer_data": None},
                      "block1/unit_2": {"layer_index": 4, "layer_data": None},
                      "input": {"layer_index": 0, "layer_data": None}}
        for partition_layer_name in input_info.keys():
            layer_data = np.load("middle_data/" + model_name + "/" + partition_layer_name.replace("/","_") + "_guitar" + ".npy")
            #print(partition_layer_name,layer_data.shape)
            input_info[partition_layer_name]["layer_data"] = np.asarray(layer_data, dtype=np.float32)[np.newaxis, :]
        models_path = "model_zoo/weights/" + model_name + ".ckpt"
        data_format = [None, 224, 224, 3]
        #input_data = np.asarray(np.load("middle_data/" + model_name + "/input_guitar" + ".npy"),
        #                        dtype=np.float32)[np.newaxis, :]
        with tf.device("/device:GPU:0"):
             with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
                with slim.arg_scope(resnet_v2.resnet_arg_scope()):
                    input_images = tf.placeholder(dtype=tf.float32, shape=data_format, name='input')
                    out, endpoints = resnet_v2.resnet_v2_50(inputs= input_images)
                    # for name in endpoints.keys():
                    # print(name,endpoints[name].shape)
                    sess.run(tf.global_variables_initializer())
                    saver = tf.train.Saver()
                    saver.restore(sess, models_path)
                    # for partition_layer_name in input_info.keys():
                        # layer_data = np.load("middle_data/" + model_name + "/" + partition_layer_name.replace("/","_") + "_guitar" + ".npy")
                        # print(partition_layer_name,layer_data.shape)
                        # data = np.asarray(layer_data, dtype=np.float32)[np.newaxis, :]
                        # result = sess.run(endpoints[partition_layer_name],feed_dict={input_images:input_data})[0]
                        # np.save("middle_data/" + model_name + "/" + partition_layer_name.replace("/","_") + "_guitar_new" + ".npy",result)
                    self.run_baselines(input_images,input_info,out,endpoints,sess,model_name,layer_name_list)

    def run_baselines_mobilenet(self):
        partition_layer = ['input', 'Conv2d_0', 'Conv2d_1_pointwise', 'Conv2d_2_pointwise',
                           'Conv2d_3_pointwise', 'Conv2d_4_pointwise', 'Conv2d_5_pointwise',
                           'Conv2d_6_pointwise', 'Conv2d_7_pointwise', 'Conv2d_8_pointwise',
                           'Conv2d_9_pointwise', 'Conv2d_10_pointwise', 'Conv2d_11_pointwise',
                           'Conv2d_12_pointwise', 'Conv2d_13_pointwise', 'Predictions']
        data_formats = {'input': (224, 224, 3), 'Conv2d_0': (112, 112, 32), 'Conv2d_1_pointwise': (112, 112, 64),
                        'Conv2d_2_pointwise': (56, 56, 128),
                        'Conv2d_3_pointwise': (56, 56, 128), 'Conv2d_4_pointwise': (28, 28, 256),
                        'Conv2d_5_pointwise': (28, 28, 256),
                        'Conv2d_6_pointwise': (14, 14, 512), 'Conv2d_7_pointwise': (14, 14, 512),
                        'Conv2d_8_pointwise': (14, 14, 512),
                        'Conv2d_9_pointwise': (14, 14, 512), 'Conv2d_10_pointwise': (14, 14, 512),
                        'Conv2d_11_pointwise': (14, 14, 512),
                        'Conv2d_12_pointwise': (7, 7, 1024), 'Conv2d_13_pointwise': (7, 7, 1024)}
        model_name = "mobilenet_v1"
        input_info = {"Conv2d_10_pointwise": {"layer_index": 7, "layer_data": None},
                      "Conv2d_6_pointwise": {"layer_index": 7, "layer_data": None},
                      "Conv2d_2_pointwise": {"layer_index": 7, "layer_data": None},
                      "input": {"layer_index": 0, "layer_data": None}}
        for partition_layer_name in input_info.keys():
            layer_data = np.load("middle_data/" + model_name + "/" + partition_layer_name + "_guitar" + ".npy")
            print(partition_layer_name,layer_data.shape)
            input_info[partition_layer_name]["layer_data"] = np.asarray(layer_data, dtype=np.float32)

        models_path = "model_zoo/weights/" + model_name + ".ckpt"
        data_format = [None, 244, 244, 3]
        with tf.device("/device:GPU:0"):
             with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
                with slim.arg_scope(resnet_v2.resnet_arg_scope()):
                    input_images = tf.placeholder(dtype=tf.float32, shape=data_format, name='input')
                    out, endpoints = resnet_v2.resnet_v2_50(inputs= input_images)
                    sess.run(tf.global_variables_initializer())
                    saver = tf.train.Saver()
                    saver.restore(sess, models_path)
                    self.run_baselines(input_images,input_info,out,endpoints,sess,model_name)
'''
gpu = GPUPartition()
#gpu.run_partition_inception()
writer = pd.ExcelWriter("model_partition_latency/resnet/resnet_partition_latency_details_tx2.xlsx")
model_name = "inception_v3"
for bw in [85.3,90.3,92.625,105,79.2,88.3,91.9,104,81.85,87.2,90.8,106]:
    result = gpu.partition_resnet(bw)
    #print(len(gpu.layer_data_size[:len(gpu.layer_data_size)-1]),len(result[0]))
    result_pd = pd.DataFrame({"mobile":result[0],"network":result[1],
                              "edge":result[2],"e2e":result[3]})
    result_pd.to_excel(writer,sheet_name="bw="+str(bw))
writer.save()
writer.close()
'''
for bw in range(80,120):
    excel = "model_partition_latency/inception/inception_partition_latency_details.xlsx"
    data = pd.read_excel(excel,sheet_name="bw="+str(bw),index_col=0)
    e2e = data["e2e"].values
    index = np.argmin(e2e)
    if index!=0:
        print(index)
    #print(index)




