# -*- coding: utf-8 -*-
import time
from concurrent import futures
import cv2
import sys
import json
import threading
import socket
from math import ceil
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
import grpc
import base64
import Datas_pb2
import Datas_pb2_grpc 
import paho.mqtt.client as mqtt
class SendImage(Datas_pb2_grpc.SendImageServicer):
    FLserver=None
    def __init__(self,FLserver):
        self.FLserver=FLserver
        pass
    def SendRequest(self, request, context):
        b64d = base64.b64decode(request.datas)
        dBuf = np.frombuffer(b64d, dtype=np.uint8)
        dst = cv2.imdecode(dBuf, cv2.IMREAD_COLOR)
        self.FLserver.local_model_predict_camera(dst)
        time.sleep(1)
        return Datas_pb2.toClient(reply=1)

class FLserver:
    #核心部分--控制
    command_id=set()
    isroot=False
    running=False
    th_listen=None #监听端口的线程
    state_lock = threading.Lock()
    state=0#工作状态 0未初始化 1正常工作 2正在训练/等待下层聚合 3向上层继续聚合/完成聚合 4接收、发送新模型、更新模型
    isdebug=False#调试模式下不连接mqtt

    #核心部分--缓存
    local_ip=None
    data_json=None #发送的梯度数据
    send_bytes_once=None
    cloud_server=None #云服务器，向云服务器发送状态信息
    up_server=None
    down_server=None
    n_down_server=None
    local_model=None
    aggregate_model=None
    n_data_layer=None#有数据的层数，creat_model时初始化
    model_data_layer_index=None#有数据的层数，creat_model时初始化
    received_buffer_new=[]#接收新模型的缓存,是list（数据包发送总数）
    received_buffer=[]#接收模型的缓存,是list（下层服务器个数，数据包发送总数）
    is_buffer_initialized=[]#0未初始化不知道是否参与，1已经初始化，-1不参与
    received_weight=[]#接收各个ip下数据的权重，在发送最后一个数据时发送


    #参数部分
    wait_time=3    #等待时间,接收数据间隔超过这个时间则认为丢包
    send_speed=20   #发送数据的速度M/s，不能超过21.7
    aggregate_waite_time=60 #等待时间,开始聚合后这个时间下层ip还没有开始传输数据则放弃
    port=None
    local_weight=1#本地权重，如果不参与训练则为0
    isparticipant=False#是否参与本轮训练

    #应用部分
    client=None    #mqtt client
    mqtt_server=None
    x_train=[]#储存图片
    y_train=[]#储存标签

    #---------------主要函数、控制相关-----------------
    def listen(self):
        server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        server_socket.bind(("", self.port)) 
        while(self.running):
            server_socket.settimeout(3)  #每3秒钟检测是否关闭
            try:
                receive_data, client = server_socket.recvfrom(1400)
                #print("来自客户端%s,发送的%s\n" % (client, receive_data))  #打印接收的内容
                json_data = json.loads(receive_data.decode('utf-8'))
                if "feedback_id" in json_data:
                    if not self.feedback(client,json_data):
                        json_data={}#已经处理过的任务就跳过
                if "order" in json_data:
                    if (json_data['order']=="aggregation_request"):#开始聚合的请求
                        if(self.isroot):
                            th_start_aggregate= threading.Thread(target=self.start_aggregation, args=())
                            th_start_aggregate.start()
                        else:
                            self.command_send((self.up_server,self.port),receive_data.decode('utf-8'))#不是根节点就向上传递请求
                    elif(json_data['order']=="aggregation_confirm" and self.state!=2):
                        th_start_aggregate= threading.Thread(target=self.start_aggregation, args=())
                        th_start_aggregate.start()
                    elif (json_data['order']=="aggregation_data"):#下层上传的模型
                        self.receive_model(json_data)#接收下层上传的模型
                    elif (json_data['order']=="update_data"):#上层向下层传输的新模型
                        self.receive_update_model(json_data)#在第一次接收数据时调用startupdate,因为更新模型的数据传输是从上往下的，不需要确认命令
                    elif(json_data['order']=="aggregation_resend"):
                        self.resend_local_model(json_data['rsend_id_list'],"aggregation_data")
                    elif(json_data['order']=="update_resend"):
                        self.resend_local_model(json_data['rsend_id_list'],"update_data")
                    elif(json_data['order']=="test_local_model"):#外部命令，测试模型准确度
                        self.test_local_model()
                    elif(json_data['order']=="read_model"):#外部命令，读取模型
                        self.local_model_read()
                    elif(json_data['order']=="reset_model"):#外部命令，重置模型
                        self.local_model_reset()
                    elif(json_data['order']=="save_model"):#外部命令，保存模型
                        self.local_model_save()
                    elif(json_data['order']=="send_aggregation_request"):#外部命令,向上请求聚合
                        self.udp_send((self.up_server,self.port),'{"order":"aggregation_request"}'.encode('utf-8'))
                    elif(json_data['order']=="change_topology"):#外部命令,修改拓扑结构
                        self.change_topology(json_data)

            except socket.timeout:
                pass
        print("stop listen")
        return
    def start(self):
        self.state_lock.acquire()
        self.state=1
        self.state_lock.release()
        self.running=True
        if not self.isdebug:
            self.client = mqtt.Client()
            self.client.connect(self.mqtt_server)
        else:
            print("It is debug mode")

        self.th_listen= threading.Thread(target=self.listen, args=())
        self.th_listen.start()
        print('Federated Learning v1.1.6 started at '+self.local_ip+":"+str(self.port))
        th_show= threading.Thread(target=self.show_state, args=())
        th_show.start()
        if not self.isdebug:
            th_camer= threading.Thread(target=self.camer_server, args=())
            th_camer.start()
        return
    def stop(self):
        self.running=False
        self.state_lock.acquire()
        self.state=0
        self.state_lock.release()
        self.th_listen.join()
        return
    def show_state(self):
        last_state=0
        while self.running:
            time.sleep(1)
            if self.state!=last_state:
                print("state changes to:",self.state)
                last_state=self.state
            order_json=json.dumps({"order":"send_state","ip":s.local_ip,"isroot":self.isroot,"state":self.state,"up_server":self.up_server,"down_server":self.down_server})
            self.udp_send((self.cloud_server,self.port),order_json.encode('utf-8'))   
        return#向云服务器上传状态
    def change_topology(self,json_data):
        print("start to change_topology")
        if self.state!=1:
            text="ip:"+self.local_ip+",isn't in state 1,refuse to change topology"
            sendjson=json.dumps({"order":"print","text":""})
            self.udp_send((self.cloud_server,self.port),sendjson.encode('utf-8'))
            return
        self.state_lock.acquire()
        self.up_server=json_data['up_server']
        self.down_server=json_data['down_server']
        if self.up_server=="":
            self.isroot=True
        else:
            self.isroot=False
        self.clear_buffer()
        self.state_lock.release()
        sendjson=json.dumps({"order":"feedback","ip":self.local_ip ,"detail":"topology_changed"})
        self.udp_send((self.cloud_server,self.port),sendjson.encode('utf-8'))
        print("------------change_topology success----------")
        return

    def __init__(self,config_file):
        #获取计算机名称
        hostname=socket.gethostname()
        #获取本机IP
        self.local_ip=socket.gethostbyname(hostname)
        with open(config_file,'r',encoding='utf-8')as fp:
            json_data = json.load(fp)
            print('config file:',json_data)
            self.up_server=json_data['up_server']
            self.down_server=json_data['down_server']
            self.port=json_data['port']
            self.send_bytes_once=json_data['send_bytes_once']
            self.n_down_server=len(self.down_server)
            self.local_model=self.create_model()
            self.aggregate_model=self.create_model()
            if "local_ip" in json_data:
                self.local_ip=json_data["local_ip"]
            if "wait_time" in json_data:
                self.wait_time=json_data["wait_time"]
            if "isparticipant" in json_data:
                self.isparticipant=json_data["isparticipant"]
            if "aggregate_waite_time" in json_data:
                self.aggregate_waite_time=json_data["aggregate_waite_time"]
            if "send_speed" in json_data:
                self.send_speed=json_data["send_speed"]
            if "cloud_server" in json_data:
                self.cloud_server=json_data["cloud_server"]
            if "camera_path" in json_data:
                self.camera_path=json_data["camera_path"]
            if "mqtt_server" in json_data:
                self.mqtt_server=json_data["mqtt_server"]
            if "isdebug" in json_data:
                self.isdebug=json_data["isdebug"]
            if "send_image_channel" in json_data:
                self.send_image_channel=json_data["send_image_channel"]
            if json_data['local_model']==1:
                self.local_model_read()
            if self.up_server=="":
                self.isroot=True
        self.clear_buffer()
        return
    #---------------模型训练、预测、更新、存储、读取-----------------
    def local_model_train(self):#本地训练
        print("start local_model_train")
        mnist = tf.keras.datasets.mnist
        (x_train, y_train),(x_test, y_test) = mnist.load_data()
        x_train, x_test = x_train / 255.0, x_test / 255.0
        data_choose=np.random.randint(0,6)
        self.local_model.fit(x_train[1000*data_choose:1000*data_choose+1000], y_train[1000*data_choose:1000*data_choose+1000], epochs=1, batch_size=50,verbose=1)
        #print(self.local_model.layers[1].get_weights()[0][0][0])
        print("local_model_train complete")
        return
    def local_model_train0(self):#手势识别本地训练
        print("start local_model_train")
        print("reading local data sets")
        x_train,y_train=self.get_data_sets('.//train_image_full')
        #print(x_train.shape)
        print("reading local data sets complete")
        self.local_model.fit(x_train, y_train, epochs=5,verbose=1,batch_size=20)
        print("local_model_train complete")
        return
    def local_model_predict(self,addr):#传入图片地址，预测结果
        image = tf.io.read_file(addr)
        image = tf.image.decode_jpeg(image, channels=3)
        image = tf.image.resize(image, [100, 100])
        image /= 255.0
        x=np.zeros((1,100,100,3))
        x[0]=image
        ans=self.local_model.predict(x)#predict输出的是最后一层
        print("predict result",np.argmax(ans))
        return np.argmax(ans)
    def local_model_update(self):
        received_buffer_add=""
        for j in range(0,len(self.received_buffer_new)):
            if self.received_buffer_new[j]==[]:
                raise ValueError("while update local model,find empty data")
            received_buffer_add+=self.received_buffer_new[j]
        r_data_dict=json.loads(received_buffer_add)#dict
        r_layer_data=r_data_dict["layer_data"]
        for j in range(0,self.n_data_layer):
            for ii in range(0,2):
                r_layer_data[j][ii]=np.array(r_layer_data[j][ii])
            self.local_model.layers[self.model_data_layer_index[j]].set_weights(r_layer_data[j])
        return
    def local_model_read(self):
        self.local_model.load_weights('.//model//gestureModel.h5')
        print("model read")
        return
    def local_model_save(self):
        self.local_model.save_weights('.//model//gestureModel.h5')
        print("model saved")
        return
    def local_model_reset(self):
        self.local_model=self.create_model()
        print("model reset")
        return
    def test_local_model(self):
        mnist = tf.keras.datasets.mnist
        (x_train, y_train),(x_test, y_test) = mnist.load_data()
        x_train, x_test = x_train / 255.0, x_test / 255.0
        print("*********test local model:************")
        print(self.local_model.evaluate(x_test, y_test)[1])
        #print(self.local_model.layers[1].get_weights()[0][0][0])
        return#mnist手写体识别
    def test_local_model0(self):
        x_train,y_train=self.get_data_sets('.//test_image')#
        print(self.local_model.evaluate(x_train, y_train)[1])
        return#手势识别
    def create_model(self):#debug用的手写体识别的cnn
        model = tf.keras.models.Sequential([
        tf.compat.v2.keras.layers.Reshape((28,28,1),input_shape=(28, 28)),
        tf.keras.layers.Convolution2D(32, (5, 5), activation=tf.nn.relu),#    tf.keras.layers.Convolution2D(32, (5, 5), activation=tf.nn.relu),
        tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
        tf.keras.layers.Convolution2D(64, (5, 5), activation=tf.nn.relu),
        tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
        tf.keras.layers.Flatten(),
        tf.keras.layers.Dense(512, activation=tf.nn.relu),
        tf.keras.layers.Dense(10, activation=tf.nn.softmax)
        ])
        model.compile(optimizer='adam',
                      loss='sparse_categorical_crossentropy',
                      metrics=['accuracy'])
        self.model_data_layer_index=[1,3,6,7]
        self.n_data_layer=4
        return model
    def create_model0(self):#手势识别的cnn
        model = tf.keras.applications.MobileNetV2(weights = None, classes=10, input_shape=(100, 100, 3))
        model.build(input_shape=(None, 100, 100, 3))
        model.compile(optimizer=tf.optimizers.SGD(learning_rate=0.001),
                      loss='sparse_categorical_crossentropy', metrics=['accuracy'])
        model_depth=len(model.layers)
        model_data_layer_index=[]
        n_data_layer=0
        for i in range(0,model_depth):
            if model.layers[i].get_weights()!=[]:
                n_data_layer+=1
                model_data_layer_index.append(i)
        self.model_data_layer_index=model_data_layer_index
        self.n_data_layer=n_data_layer
        #print(model_data_layer_index)
        #print(n_data_layer)
        #model.summary()
        return model

    #---------------联邦学习相关-----------------
    def average_aggregate_model_root(self):
        all_weight=self.add_aggregate_model()#计算平均
        aggregate_layer=[]
        new_layer=[]
        for i in self.model_data_layer_index:
            aggregate_layer.append(self.aggregate_model.layers[i].get_weights())
        for i in range(0,self.n_data_layer):
            new_layer.append(np.array([np.zeros(aggregate_layer[i][0].shape),np.zeros(aggregate_layer[i][1].shape)]))
            new_layer[i]+=aggregate_layer[i]
            new_layer[i]=new_layer[i]/all_weight
            self.aggregate_model.layers[self.model_data_layer_index[i]].set_weights(new_layer[i])
        print("average_aggregate_model_root complete")
        return
    def add_aggregate_model(self):#计算平均模型，返回总模型权重
        #本地模型
        print("\nstart aggregation,this will take several minutes...")
        weight_all=0
        local_layer=[]
        new_layer=[]
        for i in self.model_data_layer_index:
            local_layer.append(self.local_model.layers[i].get_weights())
        for i in range(0,self.n_data_layer):
            new_layer.append(np.array([np.zeros(local_layer[i][0].shape),np.zeros(local_layer[i][1].shape)]))
        for i in range(0,self.n_down_server):
            received_buffer_add=""
            if self.is_buffer_initialized[i]==1:
                for j in range(0,len(self.received_buffer[i])):
                    received_buffer_add+=self.received_buffer[i][j]
                r_data_dict=json.loads(received_buffer_add)#dict
                r_layer_data=r_data_dict["layer_data"]
                r_layer_data=np.array(r_layer_data)
                for j in range(0,self.n_data_layer):
                    for ii in range(0,2):
                        r_layer_data[j][ii]=np.array(r_layer_data[j][ii])
                    new_layer[j]+=r_layer_data[j]*self.received_weight[i]
                weight_all+=self.received_weight[i]

        if self.isparticipant:
            for j in range(0,self.n_data_layer):
                new_layer[j]+=local_layer[j]*self.local_weight#暂时默认本地模型权重为1
            weight_all+=self.local_weight
        for j in range(0,self.n_data_layer):
            self.aggregate_model.layers[self.model_data_layer_index[j]].set_weights(new_layer[j])
        print("add_aggregate_model complete")
        return weight_all
    def update(self):#收齐数据
        received_timmer=time.time()
        last_receive_time=0
        last_receive_n=0
        while(self.state==4 and self.running):
            is_update_completed=True
            time.sleep(0.5)#每0.5s检查是否收齐，不加sleep会占用listen线程的资源，会很慢
            if last_receive_time==0:
                last_receive_time=time.time()#初始化，第一次收到数据才会执行start_update，所以此时已经有数据来了
            N=len(self.received_buffer_new)
            n=0
            for j in range(0,N):
                if self.received_buffer_new[j]!=[]:
                    n+=1
            if n<N:
                is_update_completed=False
                if n !=last_receive_n:
                    last_receive_n=n
                    last_receive_time=time.time()
                else:
                    if time.time()-last_receive_time>self.wait_time:
                        resend_id_list=[]
                        for j in range(0,N):
                            if self.received_buffer_new[j]==[]:
                                resend_id_list.append(j)
                        resend_order_json=json.dumps({"order":"update_resend","rsend_id_list":resend_id_list})
                        print("aks for resend",end='')
                        print(resend_id_list)
                        #self.udp_send((self.up_server,self.port),resend_order_json.encode('utf-8'))
                        time.sleep(5)
            if(is_update_completed):
                print("receive complete")
                break
        return#检查收齐数据
    def start_update(self):
        if not self.isroot:#非根节点需要接收更新数据
            self.update()
            print("receive new model complete")
            self.local_model_update()#接受完数据则进行更新
        else:
            self.local_model=self.aggregate_model#根节点只需要将聚合结果覆盖过来就行
        for i in range(0,self.n_down_server):#向下层发送更新数据
            print("start send new model")
            self.send_model((self.down_server[i],self.port),self.local_model,1,"update_data")
            print("send complete")
        self.state_lock.acquire()
        self.state=1
        self.state_lock.release()
        return
    def aggregate(self):
        received_timmer=time.time()
        last_receive_time=[]
        last_receive_n=[]
        for i in self.down_server:
            last_receive_time.append(0)
            last_receive_n.append(0)
        while(self.state==2 and self.running):
            is_aggregate_completed=True
            time.sleep(0.5)#每0.5s检查是否收齐，不加sleep会占用listen线程的资源，会很慢
            for i in range(0,self.n_down_server):#i为服务器索引，j为buffer长度
                if self.is_buffer_initialized[i]==1:#只检查已经初始化（确定参与,且已经开始传输数据）的缓存
                    if last_receive_time[i]==0:
                        last_receive_time[i]=time.time()#初始化
                    N=len(self.received_buffer[i])
                    n=0
                    for j in range(0,N):
                        if self.received_buffer[i][j]!=[]:
                            n+=1
                    if n<N:
                        is_aggregate_completed=False
                        if n !=last_receive_n[i]:
                            last_receive_n[i]=n
                            last_receive_time[i]=time.time()
                        else:
                            if time.time()-last_receive_time[i]>self.wait_time:
                                resend_id_list=[]
                                for j in range(0,N):
                                    if self.received_buffer[i][j]==[]:
                                        resend_id_list.append(j)
                                resend_id_n=len(resend_id_list)#丢包的个数
                                print("packet lost",resend_id_n,",try to resend")
                                for ii in range(0,ceil(resend_id_n/50)):
                                    resend_order_json=json.dumps({"order":"aggregation_resend","rsend_id_list":resend_id_list[50*ii:50*(ii+1)]})
                                    self.udp_send((self.down_server[i],self.port),resend_order_json.encode('utf-8'))
                                    time.sleep(0.023)
                                resend_order_json=json.dumps({"order":"aggregation_resend","rsend_id_list":resend_id_list[50*ceil(resend_id_n/50):]})
                                self.udp_send((self.down_server[i],self.port),resend_order_json.encode('utf-8'))
                                last_receive_time[i]=time.time()
                elif self.is_buffer_initialized[i]==0:#有子ip没有响应
                    if time.time()-received_timmer>self.aggregate_waite_time:#等待时间超过限制
                        print("waite"+self.down_server[i]+"overtime, give up this down server")
                    else:
                        is_aggregate_completed=False

            if(is_aggregate_completed):
                print("receive complete:",end='')

                self.state_lock.acquire()
                self.state=3
                self.state_lock.release()
        return#检查收齐数据
    def start_aggregation(self):#向下传递开始聚合的命令并进入聚合状态
        print("start_aggregation")
        self.state_lock.acquire()
        self.state=2
        self.state_lock.release()
        self.clear_buffer()
        if(self.n_down_server!=0):#有子节点才需要接收
            th_aggregate= threading.Thread(target=self.aggregate, args=())
            th_aggregate.start()
            for ds in self.down_server:
                self.command_send((ds,self.port),'{"order":"aggregation_confirm"}')
        else:#没有下层ip
            pass
        if(self.isparticipant):
            self.local_model_train()#开始本地训练
        if(self.n_down_server!=0):#有子节点才需要接收
            th_aggregate.join()
        #计算平均、向上传输
        self.state_lock.acquire()
        self.state=3
        self.state_lock.release()
        aggregate_empty=True#如果自己与子节点都不参与则不参与聚合
        if self.isparticipant:
            aggregate_empty=False
        else:
            for i in range(0,self.n_down_server):
                if self.is_buffer_initialized[i]==1:
                    aggregate_empty=False#自己不参与,但有子节点参与
        if not self.isroot:#非根节点
            if aggregate_empty:#本节点与子节点都不参与聚合
                resend_order_json=json.dumps({"order":"aggregation_data","ip":s.local_ip,"N":0})
                self.udp_send((self.up_server,self.port),resend_order_json.encode('utf-8'))
            else:
                all_weight=self.add_aggregate_model()
                print("start send local model")
                self.send_model((self.up_server,self.port),self.aggregate_model,all_weight,"aggregation_data")#向上层发送聚合结果
                print("send local model complete")
                self.state_lock.acquire()#非root进入状态1等待分发新模型
                self.state=1
                self.state_lock.release()
        else:#是根节点
            if aggregate_empty:#本节点与子节点都不参与聚合
                print("this server and it's down server don't take part in the aggregration")
                self.state_lock.acquire()
                self.state=1
                self.state_lock.release()
                return
            self.average_aggregate_model_root()#计算平均
            #开始分发模型
            self.start_update()
        return

    #---------------传输数据-----------------
    def resend_local_model(self,rsend_id_list,order):
        #已经调用过send_model并给self.data_json赋值过了，resend_local_model发送上一次send_model所发送的数据
        data_length=len(self.data_json)
        send_turns=ceil(data_length/self.send_bytes_once)
        if send_turns-1 in rsend_id_list:#如果需要发最后一个
            sendjson='{"order":"'+order+'","ip":"'+self.local_ip+'","N":'+str(send_turns)+',"data_id":'+str(send_turns-1)+',"data":'+self.data_json[(send_turns-1)*self.send_bytes_once:]+'","weight":'+str(self.local_weight)+'}'
            self.udp_send((self.up_server,self.port),sendjson.encode('utf-8'))
            rsend_id_list.remove(send_turns-1)
        sleep_time=50*self.send_bytes_once/self.send_speed*10e-6#控制发送速度,*50是发送50次数据等待一段时间
        for i in rsend_id_list:
            if i%50==1:
                time.sleep(sleep_time-0.0023)#0.0023是发送数据本身需要的时间，最大速度21.7Mb/s
            sendjson='{"order":"'+order+'","ip":"'+self.local_ip+'","N":'+str(send_turns)+',"data_id":'+str(i)+',"data":"'+self.data_json[i*self.send_bytes_once:(i+1)*self.send_bytes_once]+'"}'
            self.udp_send((self.up_server,self.port),sendjson.encode('utf-8'))#编码时会在/前再加一个/
        return
    def send_model(self,addr,model,all_weight,order):
        layer_data=[]
        for i in self.model_data_layer_index:
            layer_data.append(model.layers[i].get_weights())
        for i in range(0,len(layer_data)):
            for j in range(0,2):
                layer_data[i][j]=layer_data[i][j].tolist()
        self.data_json = json.dumps({"layer_data":layer_data})
        self.data_json=self.data_json.replace("\"","\\\"")#replace不会改变原字符串
        data_length=len(self.data_json)
        send_turns=ceil(data_length/self.send_bytes_once)
        sleep_time=50*self.send_bytes_once/self.send_speed*10e-6#控制发送速度,*50是发送50次数据等待一段时间
        for i in range(0,send_turns-1):#发送/接收过程
            if i%50==1:
                print('\r','{:.2f}'.format(i/send_turns),end='')
                time.sleep(sleep_time-0.0023)#0.0023是发送数据本身需要的时间，最大速度21.7Mb/s
            sendjson='{"order":"'+order+'","ip":"'+self.local_ip+'","N":'+str(send_turns)+',"data_id":'+str(i)+',"data":"'+self.data_json[i*self.send_bytes_once:(i+1)*self.send_bytes_once]+'"}'
            self.udp_send(addr,sendjson.encode('utf-8'))
        #发送最后一个数据时也得发送weight
        sendjson='{"order":"'+order+'","ip":"'+self.local_ip+'","N":'+str(send_turns)+',"data_id":'+str(send_turns-1)+',"data":"'+self.data_json[(send_turns-1)*self.send_bytes_once:]+'","weight":'+str(all_weight)+'}'
        self.udp_send(addr,sendjson.encode('utf-8'))
        return
    def receive_model(self,json_data):#接收下层的聚合数据
        down_ip=json_data["ip"]
        if down_ip not in self.down_server:
            raise ValueError("receive data from"+down_ip+" ,but this ip is not my down server")
        ip_index=self.down_server.index(down_ip)
        if self.is_buffer_initialized[ip_index]==0:#第一次接收该ip的数据
            print("start receive"+down_ip+"'s data")
            if json_data["N"]==0:#表示该IP不参与聚合
                self.received_weight[ip_index]=0
                self.is_buffer_initialized[ip_index]=-1
                print(down_ip+"don't take part in the aggregation")
                return
            else:#参与聚合则初始化缓存
                for i in range(0,json_data["N"]):#初始化该下层ip的缓存
                    self.received_buffer[ip_index].append([])
                self.is_buffer_initialized[ip_index]=1
        self.received_buffer[ip_index][json_data["data_id"]]=json_data["data"]
        if json_data["data_id"]==json_data["N"]-1:#从最后一个数据读取权值
            if "weight" not in json_data:
                raise RuntimeWarning(down_ip+"dosn't have weight data at last packet ,defoult to 1")
                self.received_weight[ip_index]=1
            else:
                self.received_weight[ip_index]=json_data["weight"]
        return
    def receive_update_model(self,json_data):#与receive_model不同，因为只需要接收上层的数据,且权重必为1
        up_ip=json_data["ip"]
        if up_ip != self.up_server:
            raise ValueError("receive new model data from"+up_ip+"but it is not my up server")
        if self.state !=4:#第一次接收到数据则启动th_update
            self.state_lock.acquire()
            self.state=4
            self.state_lock.release()
            self.clear_buffer()
            for i in range(0,json_data["N"]):#初始化缓存
                self.received_buffer_new.append([])
            th_start_update= threading.Thread(target=self.start_update, args=())
            th_start_update.start()
        self.received_buffer_new[json_data["data_id"]]=json_data["data"]
        return
    #---------------基本工具-----------------
    def clear_buffer(self):#初始化缓存
        self.received_buffer=[]#接收模型的缓存,是list（下层服务器个数，数据包发送总数）
        self.is_buffer_initialized=[]#0未初始化不知道是否参与，1已经初始化，-1不参与
        self.received_weight=[]#接收各个ip下数据的权重，在发送最后一个数据时发送
        self.received_buffer_new=[]
        for i in self.down_server:
            self.received_buffer.append([])
            self.is_buffer_initialized.append(0)
            self.received_weight.append(0)
        self.n_down_server=len(self.down_server)
        return
    def udp_send(self,address,message):
        client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        client_socket.sendto(message, address) #将msg内容发送给指定接收方
        client_socket.close();
        return
    def command_send(self,address,message):#需要回执的命令 成功则返回true
        message_dic=json.loads(message)
        random_id=np.random.randint(1e6)
        message_dic['feedback_id']=random_id
        message=json.dumps(message_dic)
        self.udp_send(address,message.encode("utf-8"))
        start_time=time.time()
        while(time.time()-start_time<3):
            time.sleep(0.5)
            if random_id in self.command_id:
                return True
            self.udp_send(address,message.encode("utf-8"))#没有相应，重发命令
        print("node {} is unresponsive".format(address))
        return False
    def feedback(self,address,json_data): #已经处理过的命令返回False，新命令返回True
        id=json_data['feedback_id']
        send_dic={"order":"feedback","feedback_id":id}
        sned_json=json.dumps(send_dic)
        self.udp_send((address[0],self.port),sned_json.encode('utf-8'))
        if id in self.command_id:
            return False
        self.command_id.add(id)
        return True
    def receive_feedback(self,json_data):
        id=json_data["feedback_id"]
        self.command_id.add(id)
        return
    #---------------应用相关-----------------
    def get_data_sets(self,addr):
        # 存放图片类别和标签的列表：第0类
        list_0 = []
        label_0 = []
        # 存放图片类别和标签的列表：第1类
        list_1 = []
        label_1 = []
        # 存放图片类别和标签的列表：第2类
        list_2 = []
        label_2 = []
        # 存放图片类别和标签的列表：第3类
        list_3 = []
        label_3 = []
        # 存放图片类别和标签的列表：第4类
        list_4 = []
        label_4 = []
        # 存放图片类别和标签的列表：第5类
        list_5 = []
        label_5 = []
        # 存放图片类别和标签的列表：第6类
        list_6 = []
        label_6 = []
        # 存放图片类别和标签的列表：第6类
        list_7 = []
        label_7 = []
        # 存放图片类别和标签的列表：第8类
        list_8 = []
        label_8 = []
        # 存放图片类别和标签的列表：第9类
        list_9 = []
        label_9 = []
        for file in os.listdir(addr):  # 获得file_dir路径下的全部文件名
            image_file_path = addr+"/"+file
            for image_name in os.listdir(image_file_path):
                image_name_path = os.path.join(image_file_path, image_name)
                # 将图片存放入对应的列表
                if image_file_path[-1:] == '0':
                    list_0.append(image_name_path)
                    label_0.append(0)
                elif image_file_path[-1:] == '1':
                    list_1.append(image_name_path)
                    label_1.append(1)
                elif image_file_path[-1:] == '2':
                    list_2.append(image_name_path)
                    label_2.append(2)
                elif image_file_path[-1:] == '3':
                    list_3.append(image_name_path)
                    label_3.append(3)
                elif image_file_path[-1:] == '4':
                    list_3.append(image_name_path)
                    label_3.append(4)
                elif image_file_path[-1:] == '5':
                    list_3.append(image_name_path)
                    label_3.append(5)
                elif image_file_path[-1:] == '6':
                    list_3.append(image_name_path)
                    label_3.append(6)
                elif image_file_path[-1:] == '7':
                    list_3.append(image_name_path)
                    label_3.append(7)
                elif image_file_path[-1:] == '8':
                    list_3.append(image_name_path)
                    label_3.append(8)
                else:
                    list_4.append(image_name_path)
                    label_4.append(9)
        # 合并数据
        image_list = np.hstack((list_0, list_1, list_2, list_3, list_4, list_5, list_6, list_7, list_8, list_9))
        label_list = np.hstack((label_0, label_1, label_2, label_3, label_4, label_5, label_6, label_7, label_8, label_9))
        # 利用shuffle打乱数据
        temp = np.array([image_list, label_list])
        temp = temp.transpose()  # 转置
        np.random.shuffle(temp)
        temp = temp.transpose()  # 转置
        image_list=temp[0]
        label_list=temp[1]
        label_list=label_list.astype(np.float)
        x_train=np.zeros((len(label_list),100,100,3))
        for i in range(0,len(label_list)):
            image = cv2.imread(image_list[i])
            image = cv2.resize(image, dsize=(100, 100), interpolation=cv2.INTER_CUBIC)
            image = image/255.0
            x_train[i]=image
        return x_train ,label_list
    def camer_server(self):
        server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
        si=SendImage(self)
        Datas_pb2_grpc.add_SendImageServicer_to_server(si, server)
        server.add_insecure_port('[::]:50051')
        server.start()
        print('camera predict server start')
        while self.running:
            time.sleep(5)
        server.stop(0)
        return
    def send_image(self,image):
        # image to base64
        def toserver(frame):
            ret, buf = cv2.imencode('.jpg', frame)
            print(ret)
            if ret != 1:
                return
            # encode to base64
            b64e = base64.b64encode(buf)
            return Datas_pb2.toServer(datas=b64e)

        # grpc request once
        def getTheServer(channel, image):
            chan = grpc.insecure_channel(channel)
            stub = Datas_pb2_grpc.SendImageStub(chan)
            # base=self.Request(image)
            reply = stub.SendRequest(toserver(image)).reply
            #print(self.reply)
            return
        # multi threading call
        def SendImage(image):
            getTheServer(self.send_image_channel, image)

        threading.Thread(target=SendImage, args=(image,)).start()
        return
    def local_model_predict_camera(self,image):  # 摄像头，预测结果
        if self.state != 1:
            print("state", self.state, "predict disabled")
            while self.state != 1:
                time.sleep(3)
            return
        (p_x, p_y, channel) = image.shape
        y_shift = ceil((p_y - p_x) / 2)
        image = image[:, y_shift:y_shift + p_x]
        image = cv2.resize(image, dsize=(100, 100), interpolation=cv2.INTER_CUBIC)
        cv2.imshow("camera", image)
        cv2.waitKey(500)
        cv2.destroyAllWindows()
        #cv2.imwrite('000.jpg', image)

        image2 = image / 255.0
        image2 = np.expand_dims(image2, 0)
        ans = self.local_model.predict(image2)  # predict输出的是最后一层
        if not self.isdebug:
            data = {'name': 'MQTT-test-device', 'cmd': 'Recon_num', 'method': 'get', 'Recon_num': str(np.argmax(ans))}
            self.client.publish('DataTopic', json.dumps(data))
            try:
                self.send_image(image)
            except:
                print("send_image failed")
        print("predict result:", np.argmax(ans))
        return


s=FLserver("config.json")
s.start()



