import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
import numpy as np
import time
import json
import threading
import socket
from math import ceil
import queue

class Glimmer_send:
    local_ip=None
    port=23336
    send_bytes_once=1024
    send_speed=20
    wait_time=20#发送完毕之后等待时间


    resend_id_queue=queue.Queue()
    flag_receive_complete=False #发送端接收接收端的反馈，知道接收端是否接收完毕
    flag_start_to_send=False    #发送端接收到开始发送的命令
    flag_is_receving=False      #接收端是否正在接收，目前同时只接收一个

    th_check_receive=None
    father_instance=None
    def __init__(self,addr,instance):
        self.local_ip=addr[0]
        self.port=addr[1]
        self.father_instance=instance
        return

    def check_receive(self,s_addr):
        received_timmer=time.time()
        last_receive_time=0
        last_receive_n=0
        while(True):
            is_update_completed=True
            time.sleep(1)#每1s检查是否收齐，不加sleep会占用listen线程的资源，会很慢
            if last_receive_time==0:
                last_receive_time=time.time()
            N=len(self.received_buffer)
            n=0
            for j in range(0,N):
                if self.received_buffer[j]!=[]:
                    n+=1
            if n<N:
                is_update_completed=False
                if n !=last_receive_n:
                    last_receive_n=n
                    last_receive_time=time.time()
                else:
                    if time.time()-last_receive_time>2 and n!=0:#超过时间没有收到数据认为丢包
                        resend_id_list=[]
                        for j in range(0,N):
                            if self.received_buffer[j]==[]:
                                resend_id_list.append(j)
                        resend_id_n=len(resend_id_list)#丢包的个数
                        print("packet lost {}/{},try to resend".format(resend_id_n,N))
                        for ii in range(0,ceil(resend_id_n/50)):
                            resend_order_json=json.dumps({"order":"resend","resend_id_list":resend_id_list[50*ii:50*(ii+1)]})
                            self.father_instance.udp_send(tuple(s_addr),resend_order_json.encode('utf-8'))
                            time.sleep(0.023)
                        last_receive_time=time.time()
            if(is_update_completed):
                break
        ans=self.father_instance.command_send(tuple(s_addr),'{"order":"receive_complete"}')
        print("\rreceive_complete ",ans,"        ")
        return

    def start_receive(self,json_data):
        while(True):
            if self.flag_is_receving:
                time.sleep(1)
            else:
                self.flag_is_receving=True
                break
        call_back=json_data["call_back"]
        N=int(json_data["N"])
        s_addr=json_data["addr"]
        self.received_buffer=[]
        for i in range(0,N):#初始化缓存
            self.received_buffer.append([])

        th_check_receive=threading.Thread(target=self.check_receive, args=(s_addr,))
        th_check_receive.start()
        ans=self.father_instance.command_send(tuple(s_addr),'{"order":"start_send_data"}')
        print("please start_send_data",ans)
        th_check_receive.join()
        strb=b"".join(self.received_buffer)
        self.received_buffer=[]#清空缓存
        self.flag_is_receving=False
        eval("self.father_instance.{}".format(call_back))(strb,json_data)#调用父实例中对应的回调函数
        return

    def prepare_send(self,r_addr,send_dic,send_bytes):#允许发送比特数据和一个额外的字典类型数据，必须包含call_back，即回调函数的名字
        self.send_bytes=send_bytes
        data_length=len(self.send_bytes)
        send_turns=ceil(data_length/self.send_bytes_once)
        if "order" in send_dic or "N" in send_dic :
            raise RuntimeWarning("order N in send_dic is overrode")
        send_dic["order"]="send_ready"
        send_dic["N"]=str(send_turns)
        assert "call_back" in send_dic ,"Glimmer_send.send receives dic without key call_back"
        sned_json=json.dumps(send_dic)
        assert len(sned_json)<1400 ,"Glimmer_send send_dic is too big, big data shall be put in send_bytes"
        ans=self.father_instance.command_send(r_addr,sned_json)
        return ans

    def receive_data(self,json_data,receive_data2):
        self.received_buffer[json_data["data_id"]]=receive_data2
        return

    def send_data(self,addr):
        check_turns=50
        data_length=len(self.send_bytes)
        send_turns=ceil(data_length/self.send_bytes_once)
        should_have_time=check_turns*self.send_bytes_once/self.send_speed*10e-6
        last_time=time.time()
        for i in range(0,send_turns):#发送/接收过程
            print('\r','{}/{}'.format(i+1,send_turns),end='')
            if i%check_turns==0:
                sleep_time=should_have_time-(time.time()-last_time)
                if sleep_time>0:
                    time.sleep(sleep_time)
                last_time=time.time()
            sendjson='{"order":"data","ip":"'+self.local_ip+'","data_id":'+str(i)+'}'
            self.father_instance.udp_send(addr,sendjson.encode('utf-8')+self.send_bytes[i*self.send_bytes_once:(i+1)*self.send_bytes_once])
        #发送完之后等待有无需要重传的
        wait_time=self.wait_time
        while(wait_time>0):
            if self.flag_receive_complete:
                self.flag_receive_complete=False
                print("\rsend complete, received feed back")
                return
            if self.resend_id_queue.qsize()>0:
                resend_id_list=self.resend_id_queue.get()
                for i in resend_id_list:#发送/接收过程
                    sendjson='{"order":"data","ip":"'+self.local_ip+'","data_id":'+str(i)+'}'
                    self.father_instance.udp_send(addr,sendjson.encode('utf-8')+self.send_bytes[i*self.send_bytes_once:(i+1)*self.send_bytes_once])
            else:
                wait_time-=1
                time.sleep(1)
        print("send_data complete but didn't receive feedback")
        return

    def send(self,addr,send_dic,send_bytes):
        ans=self.prepare_send(addr,send_dic,send_bytes)
        if not ans:
            print("Glimmer_send failed,{} is irresponsive".format(addr))
            return
        waite_time=60#最大等待时间
        while(True):
            if self.flag_start_to_send:
                self.flag_start_to_send=False
                break
            if waite_time>0:
                waite_time-=1
                print("\rwaiting",end='')
            else:
                print("Glimmer_send waite time out")
                return
            time.sleep(1)
        print("start to send",end='')
        self.send_data(addr)
        return


class SplitLearning:
    #可以修改的参数
    learning_rate=0.01
    batch_size=10
    port=23336
    local_ip=None
    is_debug=False
    #自动生成的参数
    running=True
    command_id=set()
    split_model=None
    split_model_p1=None
    split_model_p2=None
    model_data_layer_index=None
    model_data_layer_index_p1=None
    model_data_layer_index_p2=None
    n_data_layer=None
    n_data_layer_p1=None
    n_data_layer_p2=None
    G_s=None
    flag={}
    flag["receive_backward"]=False
    flag["receive_sp2"]=False
    #--------------控制部分-------------------------
    def __init__(self):
        self.split_model=self.create_split_model()
        self.split_model_p1=self.create_split_model_p1()
        self.split_model_p2=self.create_split_model_p2()
        with open("config/SL_config.json",'r',encoding='utf-8')as fp:
            json_data = json.load(fp)
            print('config file:',json_data)
            self.local_ip=json_data['local_ip']
            self.port=json_data['port']
            if "is_debug" in json_data:
                self.is_debug=json_data['is_debug']
            
        return
    def start(self):
        self.th_listen= threading.Thread(target=self.listen, args=())
        self.th_listen.start()
        self.G_s=Glimmer_send((self.local_ip,self.port),self)
        if self.is_debug:
            self.test()
        return
    def listen(self):
        server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        server_socket.bind(("", self.port)) 
        while(self.running):
            server_socket.settimeout(3)  #每3秒钟检测是否关闭
            try:
                receive_data, client = server_socket.recvfrom(1400)
                #print("来自客户端%s,发送的%s\n" % (client, receive_data))  #打印接收的内容
                josn_end=receive_data.index(b"}")+1
                receive_data2=receive_data[josn_end:]
                json_data = json.loads(receive_data[:josn_end].decode('utf-8'))
                if "feedback_id" in json_data:
                    if not self.feedback(json_data):
                        json_data={}#已经处理过的任务就跳过
                if "order" in json_data:
                    #-------------<Glimmer_send>---------------
                    if (json_data["order"]=="data"):
                        self.G_s.receive_data(json_data,receive_data2)
                    elif (json_data["order"]=="send_ready"):
                        th_receive=threading.Thread(target=self.G_s.start_receive, args=(json_data,))
                        th_receive.start()
                    elif(json_data["order"]=="start_send_data"):
                        self.G_s.flag_start_to_send=True
                    elif(json_data["order"]=="resend"):
                        self.G_s.resend_id_queue.put(json_data["resend_id_list"])
                    elif(json_data["order"]=="receive_complete"):
                        self.G_s.flag_receive_complete=True
                    #-------------<Glimmer_send>---------------
                    elif (json_data["order"]=="call_back"):
                        th_callback=threading.Thread(target=eval("self.{}".format(json_data["call_back"])), args=(json_data,))
                        th_callback.start()
                   

            except socket.timeout:
                pass
        print("stop listen")
        return
    def cb_save_model(self,data,json_data):
        with open("./model/{}".format(json_data["save_name"]),"wb") as f2:
            f2.write(data)
        return
    def wait_for_flag(self,key):
        i=0
        while(True):
            if self.flag[key]:
                self.flag[key]=False
                print("\rwaite for {} complete ".format(key))
                return
            else:
                time.sleep(0.5)
                i+=1
                a=["--","\\","|","/"][i%4]
                print("\rwaite for {} {} ".format(key,a),end='')
        return
    #-------------分割学习相关---------------------
    def test(self):
        mnist = tf.keras.datasets.mnist
        (x_train, y_train),(x_test, y_test) = mnist.load_data()
        x_train, x_test = x_train / 255.0, x_test / 255.0
        y_train=tf.one_hot(y_train,depth=10,dtype=tf.uint8)
        y_test=tf.one_hot(y_test,depth=10,dtype=tf.uint8)



        print("分割学习：")
        self.load_weights_from_split_model()
        self.split_train(x_train[:100],y_train[:100],("192.168.118.102",self.port))
        print("")

        #将后半部分模型从卸载的节点取回来
        self.command_send(("192.168.118.102",self.port),'{"order":"call_back","call_back":"cb_send_sp2"}')
        self.wait_for_flag("receive_sp2")

        print("测试结果")
        self.split_test(x_test, y_test)


        print("正常训练：")
        self.split_model.fit(x_train[:100],y_train[:100], epochs=1, batch_size=10,verbose=1)
        print("测试结果")
        self.split_model.evaluate(x_test, y_test)

        return
    def create_split_model(self):#debug用的手写体识别的cnn
        input=tf.keras.Input(shape=(28, 28, 1))
        y1=tf.keras.layers.Convolution2D(32, (5, 5), activation=tf.nn.relu)(input)
        y2=tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(y1)
        #input=tf.keras.Input(shape=(12, 12, 32))
        x3=tf.keras.layers.Convolution2D(64, (5, 5), activation=tf.nn.relu)(y2)
        x4=tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(x3)
        x5=tf.keras.layers.Flatten()(x4)
        x6=tf.keras.layers.Dense(512, activation=tf.nn.relu)(x5)
        output=tf.keras.layers.Dense(10, activation=tf.nn.softmax)(x6)
        model = tf.keras.Model(input,output,name="full_model")
        #split_model.build(input_shape=(12, 12, 32))
        model.build(input_shape=(28, 28))
        #model.summary()
        model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=0.01),
                      loss=tf.keras.losses.MSE,
                      metrics=['accuracy'])
        model_depth=len(model.layers)
        model_data_layer_index=[]
        n_data_layer=0
        for i in range(0,model_depth):
            if model.layers[i].get_weights()!=[]:
                n_data_layer+=1
                model_data_layer_index.append(i)
        self.model_data_layer_index=model_data_layer_index
        self.n_data_layer=n_data_layer
        return model
    def create_split_model_p1(self):
        input=tf.keras.Input(shape=(28, 28, 1))
        y1=tf.keras.layers.Convolution2D(32, (5, 5), activation=tf.nn.relu)(input)
        output=tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(y1)
        model = tf.keras.Model(input,output,name="split_model_part1")
        model.build(input_shape=(28, 28))
        #model.summary()
        model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=0.01),
                      loss=tf.keras.losses.MSE,
                      metrics=['accuracy'])
        model_depth=len(model.layers)
        model_data_layer_index=[]
        n_data_layer=0
        for i in range(0,model_depth):
            if model.layers[i].get_weights()!=[]:
                n_data_layer+=1
                model_data_layer_index.append(i)
        self.model_data_layer_index_p1=model_data_layer_index
        self.n_data_layer_p1=n_data_layer
        return model
    def create_split_model_p2(self):
        input=tf.keras.Input(shape=(12, 12, 32))
        x3=tf.keras.layers.Convolution2D(64, (5, 5), activation=tf.nn.relu)(input)
        x4=tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(x3)
        x5=tf.keras.layers.Flatten()(x4)
        x6=tf.keras.layers.Dense(512, activation=tf.nn.relu)(x5)
        output=tf.keras.layers.Dense(10, activation=tf.nn.softmax)(x6)
        model = tf.keras.Model(input,output,name="split_model_part2")
        model.build(input_shape=(12, 12, 32))
        #model.summary()
        model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=0.01),
                      loss=tf.keras.losses.MSE,
                      metrics=['accuracy'])
        model_depth=len(model.layers)
        model_data_layer_index=[]
        n_data_layer=0
        for i in range(0,model_depth):
            if model.layers[i].get_weights()!=[]:
                n_data_layer+=1
                model_data_layer_index.append(i)
        self.model_data_layer_index_p2=model_data_layer_index
        self.n_data_layer_p2=n_data_layer
        return model
    def split_train(self,train_x,train_y,addr):
        #将后半部分模型传输给卸载的节点
        self.split_model_p2.save_weights('./model/split_model_p2.h5')
        with open("./model/split_model_p2.h5","rb") as f:
            data_bytes = f.read()
        self.G_s.send(addr,{"call_back":"cb_sp2_init"},data_bytes)

        BGD_turns=ceil(len(train_x)/self.batch_size)
        for i in range(0,BGD_turns):
            print("\rsplit_train:{}/{}".format(i+1,BGD_turns))
            #本地前向传播
            train_x_=train_x[i*self.batch_size:(i+1)*self.batch_size]
            train_y_=train_y[i*self.batch_size:min((i+1)*self.batch_size,len(train_y))]
            intermediate_forward=self.split_model_p1(train_x_)
            #将中间数据和标签传输给卸载到的节点
            np.savez("./model/intermediate_forward.npz",intermediate_forward=intermediate_forward,train_y_=train_y_)
            with open("./model/intermediate_forward.npz","rb") as f:
                data_bytes = f.read()
            self.G_s.send(addr,{"call_back":"cb_split_train"},data_bytes)

            #等待卸载到的节点传回中间数据
            self.wait_for_flag("receive_backward")
            intermediate_backward=self.intermediate_backward
            #本地反向传播，更新第一部分模型
            do_dx_list=self.back_forward_p1(train_x_,intermediate_backward)
            for j in range(self.n_data_layer_p1):
                layer_index=self.model_data_layer_index_p1[j]
                weights=self.split_model_p1.layers[layer_index].get_weights()
                for ii in range(0,len(weights)):
                    weights[ii]-=self.learning_rate*do_dx_list[j][ii]
                self.split_model_p1.layers[layer_index].set_weights(weights)

        return#本地进行分割学习测试
    def load_weights_from_split_model(self):
        assert self.n_data_layer_p1+self.n_data_layer_p2==self.n_data_layer , "split model error"
        for i in range(self.n_data_layer):
            layer_index=self.model_data_layer_index[i]
            weights=self.split_model.layers[layer_index].get_weights()
            if i <self.n_data_layer_p1:
                layer_index_p1=self.model_data_layer_index_p1[i]
                self.split_model_p1.layers[layer_index_p1].set_weights(weights)
            else:
                layer_index_p2=self.model_data_layer_index_p2[i-self.n_data_layer_p1]
                self.split_model_p2.layers[layer_index_p2].set_weights(weights)
        return
    def split_test(self,x_test, y_test):
        intermediate=self.split_model_p1(x_test)
        self.split_model_p2.evaluate(intermediate, y_test)
        return
    @tf.function
    def back_forward_p1(self,train_x_,intermediate_backward):
        do_dx_list=[]
        intermediate_forward=self.split_model_p1(train_x_)
        for j in self.model_data_layer_index_p1:
            do_dx = tf.gradients(intermediate_forward, self.split_model_p1.layers[j].trainable_variables,grad_ys=intermediate_backward)
            do_dx_list.append(do_dx)
        return do_dx_list
  
    def cb_sp2_init(self,data,json_data):
        with open("./model/split_model_p2.h5","wb") as f:
            f.write(data)
        self.split_model_p2.load_weights("./model/split_model_p2.h5")
        return
    def cb_sp2_sendback(self,data,json_data):#接收训练好的后半部分模型
        with open("./model/split_model_p2.h5","wb") as f:
            f.write(data)
        self.split_model_p2.load_weights("./model/split_model_p2.h5")
        self.flag["receive_sp2"]=True
        return
    def cb_split_train(self,data,json_data):
        addr=json_data["addr"]
        with open("./model/intermediate_forward.npz","wb") as f:
            f.write(data)
        npdata=np.load("./model/intermediate_forward.npz")
        intermediate_forward=npdata["intermediate_forward"]
        train_y_=npdata["train_y_"]
        intermediate_forward=tf.convert_to_tensor(intermediate_forward)
        with tf.GradientTape(persistent=True) as t:
            t.watch(intermediate_forward)
            y_pred=self.split_model_p2(intermediate_forward)
            mse = tf.losses.mean_squared_error(train_y_, y_pred)
            mse_ave=tf.reduce_mean(mse)#对mse取平均

        for j in self.model_data_layer_index_p2:
            do_dx = t.gradient(mse_ave, self.split_model_p2.layers[j].trainable_variables)
            weights=self.split_model_p2.layers[j].get_weights()
            for ii in range(0,len(weights)):
                weights[ii]-=self.learning_rate*do_dx[ii]
            self.split_model_p2.layers[j].set_weights(weights)
        intermediate_backward=t.gradient(mse_ave, intermediate_forward)
        del t

        #将中间数据传输给原节点
        np.savez("./model/intermediate_backward.npz",intermediate_backward=intermediate_backward)
        with open("./model/intermediate_backward.npz","rb") as f:
            data_bytes = f.read()
        self.G_s.send(tuple(addr),{"call_back":"cb_split_train2"},data_bytes)
        return
    def cb_split_train2(self,data,json_data):
        with open("./model/intermediate_backward.npz","wb") as f:
            f.write(data)
        npdata=np.load("./model/intermediate_backward.npz")
        self.intermediate_backward=npdata["intermediate_backward"]
        self.flag["receive_backward"]=True
        return
    def cb_send_sp2(self,json_data):
        addr=tuple(json_data["addr"])
        self.split_model_p2.save_weights("./model/split_model_p2.h5")
        with open("./model/split_model_p2.h5","rb") as f:
            data_bytes = f.read()
        self.G_s.send(addr,{"call_back":"cb_sp2_sendback"},data_bytes)
        return


    #--------------工具----------------------------
    def udp_send(self,address,message):
        client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        client_socket.sendto(message, address) #将msg内容发送给指定接收方
        client_socket.close();
        return
    def command_send(self,address,message):#需要回执的命令 成功则返回true
        message_dic=json.loads(message)
        random_id=np.random.randint(1e6)
        if "feedback_id" in message_dic or "addr" in message_dic:
            raise RuntimeWarning("feedback_id addr in message_dic is overrode")
        message_dic['feedback_id']=random_id
        message_dic['addr']=(self.local_ip,self.port)

        message=json.dumps(message_dic)
        self.udp_send(address,message.encode("utf-8"))
        start_time=time.time()
        check_time=0.1#检查是否收到回执的间隙
        resend_time=0.5#重发消息的间隙
        resend_n=round(resend_time/check_time)
        n=0
        while(time.time()-start_time<3):
            time.sleep(check_time)
            n+=1
            if random_id in self.command_id:
                return True
            elif n==resend_n:
                self.udp_send(address,message.encode("utf-8"))#没有相应，重发命令
                n=0
        print("node {} is unresponsive, task id:{}, message{}".format(address,random_id,message))
        return False
    def feedback(self,json_data): #已经处理过的命令返回False，新命令返回True
        id=json_data["feedback_id"]
        ip=json_data["addr"][0]
        port=json_data["addr"][1]
        if "order" in json_data:#是feedback发送来的，不是新命令
            if json_data["order"]=="feedback":
                self.command_id.add(id)
                return False#回执不需要处理
        #可能是新命令
        send_dic={"order":"feedback","feedback_id":id}
        send_dic['addr']=(self.local_ip,self.port)
        sned_json=json.dumps(send_dic)
        self.udp_send((ip,port),sned_json.encode('utf-8'))
        if id in self.command_id:
            return False
        self.command_id.add(id)
        return True




s=SplitLearning()
s.start()



