# -*- coding: utf-8 -*-
import time
import json
import threading
import socket
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
import util.Glimmer_send
import util.Glimmer_op

class FLserver:
    #核心部分--控制
    command_id=set()
    isroot=False
    running=False
    state_lock = threading.Lock()
    state=0#工作状态 0未初始化 1正常工作 2正在训练/等待下层聚合 3向上层继续聚合/完成聚合 4发送新模型、更新模型
    flag={}
    flag["glimmer_op_update"]=False
    optimizer="SGD"
    glimmer_op=None #优化器的实例
    FL_round=0      #进行的FL轮数，用来判断是否需要更新glimmer_op
    #核心部分--缓存
    local_ip=None
    cloud_server=None #云服务器，向云服务器发送状态信息
    up_server=None
    local_model=None
    temp_model=None
    n_data_layer=None#有数据的层数，creat_model时初始化
    model_data_layer_index=None#有数据的层数，creat_model时初始化
    n_down_server=None
    down_server=None
    is_child_participant=[]#0未初始化不知道是否参与，1已经初始化，-1不参与
    received_weight=[]#接收各个ip下数据的权重，在发送最后一个数据时发送

    #参数部分
    wait_time=3    #等待时间,接收数据间隔超过这个时间则认为丢包
    port=None
    local_weight=1#本地权重，如果不参与训练则为0
    isparticipant=True#是否参与本轮训练
    FSVRG_h=0.01
    FSVRG_m=100
    SGD_h=0.1
    SGD_b=1000


    #---------------主要函数、控制相关-----------------
    def listen(self):
        server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        server_socket.bind(("", self.port)) 
        while(self.running):
            server_socket.settimeout(3)  #每3秒钟检测是否关闭
            try:
                receive_data, client = server_socket.recvfrom(1400)
                #print("来自客户端%s,发送的%s\n" % (client, receive_data))  #打印接收的内容
                josn_end=receive_data.index(b"}")+1
                receive_data2=receive_data[josn_end:]
                json_data = json.loads(receive_data[:josn_end].decode('utf-8'))
                if "feedback_id" in json_data:
                    if not self.feedback(json_data):
                        json_data={}#已经处理过的任务就跳过
                if "order" in json_data:
                    #-------------<Glimmer_send>---------------
                    if (json_data["order"]=="data"):
                        self.G_s.receive_data(json_data,receive_data2)
                    elif (json_data["order"]=="send_ready"):
                        th_receive=threading.Thread(target=self.G_s.start_receive, args=(json_data,))
                        th_receive.start()
                    elif(json_data["order"]=="start_send_data"):
                        self.G_s.flag_start_to_send=True
                    elif(json_data["order"]=="resend"):
                        self.G_s.resend_id_queue.put(json_data["resend_id_list"])
                    elif(json_data["order"]=="receive_complete"):
                        self.G_s.flag_receive_complete=True
                    #-------------</Glimmer_send>---------------
                    elif (json_data["order"]=="call_back"):
                        th_callback=threading.Thread(target=eval("self.{}".format(json_data["call_back"])), args=(json_data,))
                        th_callback.start()
                    #==================================================================
                    elif (json_data['order']=="aggregation_request"):#开始聚合的请求
                        if(self.isroot):
                            self.start_FL()
                        else:
                            self.command_send((self.up_server,self.port),json.dumps(json_data))#不是根节点就向上传递请求
                    elif(json_data['order']=="aggregation_confirm"):
                        self.start_FL()
                    elif(json_data['order']=="test_local_model"):#外部命令，测试模型准确度
                        self.test_local_model()
                    elif(json_data['order']=="read_model"):#外部命令，读取模型
                        self.local_model_read()
                    elif(json_data['order']=="reset_model"):#外部命令，重置模型
                        self.local_model_reset()
                    elif(json_data['order']=="save_model"):#外部命令，保存模型
                        self.local_model_save()
                    elif(json_data['order']=="change_topology"):#外部命令,修改拓扑结构
                        self.change_topology(json_data)
                    elif(json_data['order']=="update_part1"):#外部命令,更新推理模型
                        th_update_part1=threading.Thread(target=self.update_part1, args=("localhost",))
                        th_update_part1.start()
                    elif(json_data['order']=="change_parameter"):#外部命令,更改参数
                        self.change_parameter(json_data)
                    elif(json_data['order']=="send_op_data"):#上传优化器所需数据
                        th_send_op_data=threading.Thread(target=self.send_op_data, args=(json_data,))
                        th_send_op_data.start()
            except socket.timeout:
                pass
        print("stop listen")
        return
    def start(self):
        self.state_lock.acquire()
        self.state=1
        self.state_lock.release()
        self.running=True
        self.th_listen= threading.Thread(target=self.listen, args=())
        self.th_listen.start()
        print('Federated Learning v1.4.2 started at '+self.local_ip+":"+str(self.port))
        self.th_show= threading.Thread(target=self.show_state, args=())
        self.th_show.start()
        self.th_timed_task= threading.Thread(target=self.timed_task, args=())
        self.th_timed_task.start()

        #获取本地数据（mnist）
        npdata=np.load("./model/mnist"+str(self.data_id)+".npz")
        self.train_x=npdata["nodex"]
        self.train_y=npdata["nodey"]
        self.train_x = self.train_x / 255.0
        self.train_y=tf.one_hot(self.train_y,depth=10,dtype=tf.uint8)
        return
    def stop(self):
        self.running=False
        self.state_lock.acquire()
        self.state=0
        self.state_lock.release()
        self.th_listen.join()
        self.th_show.join()
        return
    def show_state(self):
        last_state=0
        while self.running:
            time.sleep(1)
            if self.state!=last_state:
                #print("state changes to:",self.state)
                last_state=self.state
            order_json=json.dumps({"order":"send_state","ip":s.local_ip,"isroot":self.isroot,"state":self.state,"up_server":self.up_server,"down_server":self.down_server})
            self.udp_send((self.cloud_server,self.port),order_json.encode('utf-8'))   
        return#向云服务器上传状态
    def change_topology(self,json_data):
        print("start to change_topology")
        if self.state!=1:
            text="ip:"+self.local_ip+",isn't in state 1,refuse to change topology"
            sendjson=json.dumps({"order":"print","text":""})
            self.udp_send((self.cloud_server,self.port),sendjson.encode('utf-8'))
            return
        self.state_lock.acquire()
        self.up_server=json_data['up_server']
        self.down_server=json_data['down_server']
        if self.up_server=="":
            self.isroot=True
        else:
            self.isroot=False
        self.clear_buffer()
        self.state_lock.release()
        sendjson=json.dumps({"order":"feedback","ip":self.local_ip ,"detail":"topology_changed"})
        self.udp_send((self.cloud_server,self.port),sendjson.encode('utf-8'))
        print("------------change_topology success----------")
        print("is_root:{}\nup_server:{}\ndown_server:{}".format(self.isroot,self.up_server,self.down_server))
        return
    def timed_task(self):
        current_time=time.time()
        timmer1=current_time
        while(self.running):
            time.sleep(1)
            current_time=time.time()
            if current_time-timmer1>300:#每五分钟清理一次
                self.command_id.clear()
                timmer1=current_time
        return
    def __init__(self,config_file):
        #获取计算机名称
        hostname=socket.gethostname()
        #获取本机IP
        self.local_ip=socket.gethostbyname(hostname)
        self.G_s=util.Glimmer_send.Glimmer_send((self.local_ip,self.port),self)
        self.local_model=self.create_model()
        self.temp_model=self.create_model()

        with open(config_file,'r',encoding='utf-8')as fp:
            json_data = json.load(fp)
            print('config file:',json_data)
            self.up_server=json_data['up_server']
            self.down_server=json_data['down_server']
            self.port=json_data['port']
            self.send_bytes_once=json_data['send_bytes_once']
            self.data_id=json_data['data_id']
            self.n_down_server=len(self.down_server)
            if "local_ip" in json_data:
                self.local_ip=json_data["local_ip"]
            if "wait_time" in json_data:
                self.wait_time=json_data["wait_time"]
            if "isparticipant" in json_data:
                self.isparticipant=json_data["isparticipant"]
            if "send_speed" in json_data:
                self.G_s.send_speed=json_data["send_speed"]
            if "cloud_server" in json_data:
                self.cloud_server=json_data["cloud_server"]
            if "isdebug" in json_data:
                self.isdebug=json_data["isdebug"]
            if "optimizer" in json_data:
                self.optimizer=json_data["optimizer"]
            if json_data['local_model']==1:
                self.local_model_read()
            if self.up_server=="":
                self.isroot=True
        self.clear_buffer()
        self.G_s.local_ip=self.local_ip


        return
    #---------------模型训练、预测、更新、存储、读取-----------------
    def local_model_train(self):#本地训练,local_gradient不一定是梯度，也可能是模型的变化值
        print("start local_model_train")
        x_train=self.train_x
        y_train=self.train_y
        self.local_model.save_weights("./model/previous.h5")
        self.temp_model.load_weights("./model/previous.h5")
        print(self.temp_model.layers[1].get_weights()[0][0][0][0][0])
        self.local_model.optimizer.learning_rate=self.SGD_h
        self.local_model.fit(x_train, y_train, epochs=1, batch_size=self.SGD_b,verbose=1)
        for i in self.model_data_layer_index:
            new_weights=self.local_model.layers[i].get_weights()
            old_weights=self.temp_model.layers[i].get_weights()
            old_new=[]
            for j in range(len(new_weights)):
                old_new.append(old_weights[j]-new_weights[j])
            self.temp_model.layers[i].set_weights(old_new)

        #print(self.temp_model.layers[1].get_weights()[0][0][0][0][0])
        self.temp_model.save_weights("./model/local_gradient.h5")
        print("local_model_train complete")
        return
    def local_model_train0(self):#手势识别本地训练
        print("start local_model_train")
        image_list,lable_list=self.get_data_sets("./train_image")
        print("load data")
        x_train=self.read_data_sets(image_list)
        print("load data complete")
        y_train=tf.one_hot(lable_list,depth=11,dtype=tf.uint8)
        self.local_model.fit(x_train,y_train, epochs=1, batch_size=30,verbose=1)
        print("local_model_train complete")
        return
    def local_model_predict(self,addr):#传入图片地址，预测结果
        image = tf.io.read_file(addr)
        image = tf.image.decode_jpeg(image, channels=3)
        image = tf.image.resize(image, (50, 50))
        image = image/255.0
        x=np.zeros((1,50,50,3))
        x[0]=image
        ans=self.local_model.predict(x)#predict输出的是最后一层
        softmax=tf.keras.layers.Softmax()
        ans = tf.cast(ans, dtype=tf.float32)
        ans=softmax(ans)
        ans_index=np.argmax(ans)
        if ans.numpy()[0][ans_index]<0.5:
            print("no result")
            return -1
        print("predict result",ans_index)
        return ans_index
    def local_model_read(self):
        self.local_model.load_weights("./model/initial.h5")
        print("model read")
        return
    def local_model_save(self):
        self.local_model.save_weights('.//model//gestureModel.h5')
        print("model saved")
        return
    def local_model_reset(self):
        self.local_model=self.create_model()
        print("model reset")
        return
    def test_local_model(self):
        mnist = tf.keras.datasets.mnist
        (x_train, y_train),(x_test, y_test) = mnist.load_data()
        x_train, x_test = x_train / 255.0, x_test / 255.0
        y_train=tf.one_hot(y_train,depth=10,dtype=tf.uint8)
        y_test=tf.one_hot(y_test,depth=10,dtype=tf.uint8)
        print("*********test local model:************")
        ans=self.local_model.evaluate(x_test, y_test)

        time_str=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) 
        with open("./result.txt","a") as f:
            f.write("{}:accuracy :[{}] loss [{}]\n".format(time_str,ans[1],ans[0]))
        print(ans[1])
        return#mnist手写体识别
    def test_local_model0(self):
        print("start local_model_train")
        image_list,lable_list=self.get_data_sets("./test_image")
        x_test=self.read_data_sets(image_list)
        y_test=tf.one_hot(lable_list,depth=11,dtype=tf.uint8)
        print(self.local_model.evaluate(x_test,y_test,batch_size=1)[1])
        return#手势识别
    def create_model(self):#debug用的手写体识别的cnn
        input=tf.keras.Input(shape=(28, 28, 1))
        y1=tf.keras.layers.Convolution2D(32, (5, 5), activation=tf.nn.relu)(input)
        y2=tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(y1)
        #input=tf.keras.Input(shape=(12, 12, 32))
        x3=tf.keras.layers.Convolution2D(64, (5, 5), activation=tf.nn.relu)(y2)
        x4=tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(x3)
        x5=tf.keras.layers.Flatten()(x4)
        x6=tf.keras.layers.Dense(512, activation=tf.nn.relu)(x5)
        output=tf.keras.layers.Dense(10, activation=tf.nn.softmax)(x6)
        model = tf.keras.Model(input,output,name="full_model")
        #split_model.build(input_shape=(12, 12, 32))
        model.build(input_shape=(28, 28))
        #model.summary()
        model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=0.01),
                      loss=tf.keras.losses.MSE,
                      metrics=['accuracy'])
        model_depth=len(model.layers)
        model_data_layer_index=[]
        n_data_layer=0
        for i in range(0,model_depth):
            if model.layers[i].get_weights()!=[]:
                n_data_layer+=1
                model_data_layer_index.append(i)
        self.model_data_layer_index=model_data_layer_index
        self.n_data_layer=n_data_layer
        return model
    def create_model0(self):#手势识别的cnn
        model = tf.keras.models.Sequential([
            tf.keras.layers.Conv2D(16, 3, padding='same', activation='relu'),
            tf.keras.layers.MaxPooling2D(),
            tf.keras.layers.Conv2D(32, 3, padding='same', activation='relu'),
            tf.keras.layers.MaxPooling2D(),
            tf.keras.layers.Conv2D(64, 3, padding='same', activation='relu'),
            tf.keras.layers.MaxPooling2D(),
            tf.keras.layers.Flatten(),
            tf.keras.layers.Dense(128, activation='relu'),
            tf.keras.layers.Dense(11,activation=tf.nn.softmax)])
        model.build(input_shape=(None, 50, 50, 3))
        model.compile(optimizer='Adam',
                        loss=tf.keras.losses.MSE,
                        metrics=['accuracy'])
        model_depth=len(model.layers)
        model_data_layer_index=[]
        n_data_layer=0
        for i in range(0,model_depth):
            if model.layers[i].get_weights()!=[]:
                n_data_layer+=1
                model_data_layer_index.append(i)
        self.model_data_layer_index=model_data_layer_index
        self.n_data_layer=n_data_layer
        return model
    def get_weights_from_h5(self,path):
        self.temp_model.load_weights(path)
        temp_layers=self.temp_model.get_weights()
        temp_layers=np.array(temp_layers)
        return temp_layers
    #---------------联邦学习相关-----------------
    def start_FL(self):#启动联邦学习
        assert self.state==1 ,"start FL while my state is not 1"
        self.FL_round+=1
        if self.optimizer=="SGD":
            th_start_aggregate= threading.Thread(target=self.start_aggregation, args=())
            th_start_aggregate.start()
        elif self.optimizer=="FSVRG":
            th_start_aggregate= threading.Thread(target=self.start_aggregation_FSVRG, args=())
            th_start_aggregate.start()
        elif self.optimizer=="FSVRGop":
            if self.glimmer_op is None:#初始化优化器所需要的参数
                self.glimmer_op=util.Glimmer_op.Glimmer_adam(self.FL_round)#在执行前两个FL_round应该是相等的，启动start_FL之前Glimmer_op里的应该大1
            th_start_aggregate= threading.Thread(target=self.start_aggregation_FSVRG, args=())
            th_start_aggregate.start()
        return
    def average_aggregate_gradient_root(self):
        all_weight=self.add_aggregate_gradient()
        aggregate_weights=self.get_weights_from_h5("./model/AggregateGradient.h5")
        previous_weights=self.get_weights_from_h5("./model/previous.h5")
        new_weights=previous_weights-aggregate_weights/all_weight
        self.temp_model.set_weights(new_weights)
        self.temp_model.save_weights('./model/UpdateModel.h5')
        print("average_aggregate_model_root complete")
        return
    def add_aggregate_gradient(self):#将子节点的模型相加，返回总模型权重
        #本地模型
        print("start aggregation...")
        weight_all=0
        aggregate_weights=None

        for i in range(0,self.n_down_server):
            if self.is_child_participant[i]==1:
                path="./model/aggregate_gradient_"+self.down_server[i]+".h5"
                r_weights=self.get_weights_from_h5(path)
                if aggregate_weights is not None:
                    aggregate_weights+=r_weights
                else:
                    aggregate_weights=r_weights.copy()
                weight_all+=self.received_weight[i]

        if self.isparticipant:
            local_weights=self.get_weights_from_h5("./model/local_gradient.h5")
            if aggregate_weights is not None:
                aggregate_weights+=local_weights*self.local_weight
            else:
                aggregate_weights=local_weights*self.local_weight
            weight_all+=self.local_weight
        
        self.temp_model.set_weights(aggregate_weights)
        self.temp_model.save_weights("./model/AggregateGradient.h5")#这里保存的梯度数据是不能直接用的，因为还没有除以权值
        #print(self.temp_model.layers[1].get_weights()[0][0][0][0][0])
        print("add_aggregate_model complete")
        return weight_all
    def start_update(self):#根节点在聚合完成后执行该函数，非根节点在接收完新模型之后执行该函数
        self.state_lock.acquire()
        self.state=4
        self.state_lock.release()


        self.local_model.load_weights("./model/UpdateModel.h5")#更新本地模型
        #print(self.local_model.layers[1].get_weights()[0][0][0][0][0])
        with open("./model/UpdateModel.h5","rb") as f:
            data_bytes = f.read()
        for i in range(0,self.n_down_server):#向下层发送更新数据
            print("send new model to "+self.down_server[i])
            self.G_s.send((self.down_server[i],self.port),{"call_back":"cb_update_model"},data_bytes)#向下层发送新模型
        self.state_lock.acquire()
        self.state=1
        self.state_lock.release()
        return
    def start_aggregation(self):#向下传递开始聚合的命令并进入聚合状态
        print("start_aggregation")
        self.state_lock.acquire()
        self.state=2
        self.state_lock.release()
        self.clear_buffer()
        if(self.n_down_server!=0):#向子节点传递开始联邦学习的命令
            for ds in self.down_server:
                self.command_send((ds,self.port),'{"order":"aggregation_confirm"}')

        if(self.isparticipant):
            self.local_model_train()#开始本地训练
        if(self.n_down_server!=0):#有子节点才需要等待接收
            self.wait_for_child()
        #计算平均、向上传输
        self.state_lock.acquire()
        self.state=3
        self.state_lock.release()
        #判断是否参与聚合
        aggregate_empty=True#如果自己与子节点都不参与则不参与聚合
        if self.isparticipant:
            aggregate_empty=False
        else:
            for i in range(0,self.n_down_server):
                if self.is_child_participant[i]==1:
                    aggregate_empty=False#自己不参与,但有子节点参与
        if aggregate_empty:
            self.G_s.send((self.up_server,self.port),{"call_back":"cb_child_aggregation","weight":str(0)},b"aggregate_empty")#向上层发送聚合结果
            return

        if not self.isroot:#非根节点
            all_weight=self.add_aggregate_gradient()#本地聚合
            with open("./model/AggregateGradient.h5","rb") as f:
                data_bytes = f.read()
            self.G_s.send((self.up_server,self.port),{"call_back":"cb_child_aggregation","weight":str(all_weight)},data_bytes)#向上层发送聚合结果
            self.state_lock.acquire()#非root进入状态1等待分发新模型
            self.state=1
            self.state_lock.release()
        else:#是根节点
            if aggregate_empty:#本节点与子节点都不参与聚合
                print("this server and it's down server don't take part in the aggregration")
                self.state_lock.acquire()
                self.state=1
                self.state_lock.release()
                return
            self.average_aggregate_gradient_root()#计算平均
            #开始分发模型
            self.start_update()
        return
    def cb_child_aggregation(self,data,json_data):#接收并保存子节点发来的数据 判断子节点是否参与聚合
        addr=json_data["addr"]
        weight=int(json_data["weight"])
        down_ip=addr[0]
        path="./model/aggregate_gradient_"+down_ip+".h5"
        ip_index=self.down_server.index(down_ip)
        if down_ip not in self.down_server:
            raise ValueError("receive data from"+down_ip+" ,but this ip is not my down server")
        if weight==0:#表示该IP不参与聚合
            self.is_child_participant[ip_index]=-1
            print(down_ip+" don't take part in the aggregation")
            return
        self.received_weight[ip_index]=weight
        with open(path,"wb") as f:
            f.write(data)
        self.is_child_participant[ip_index]=1
        return
    def cb_update_model(self,data,json_data):
        addr=json_data["addr"]
        up_ip=addr[0]
        if up_ip != self.up_server:
            raise ValueError("receive data from"+up_ip+" ,but this ip is not my up server")
        with open("./model/UpdateModel.h5","wb") as f:
            f.write(data)
        self.start_update()
        return


    def send_op_data(self,json_data):
        if json_data["FL_round"]==self.glimmer_op.FL_round:
            self.glimmer_op.save("./model/optimizer.npz")
            with open("./model/optimizer.npz","rb") as f:
                data_bytes = f.read()
            self.G_s.send((json_data["root_ip"],self.port),{"call_back":"cb_update_optimizer"},data_bytes)
        else:
            for ip in self.down_server:
                self.command_send((ip,self.port),json.dumps(json_data))#上一轮不是根节点就向下传递请求
        return
    def cb_update_optimizer(self,data,json_data):
        with open("./model/optimizer.npz","wb") as f:
            f.write(data)
        self.glimmer_op.load("./model/optimizer.npz")
        assert self.glimmer_op.FL_round==self.FL_round ,"cb_update_optimizer received wrong optimizer data"
        self.flag["glimmer_op_update"]=True
        return
    #    ---------fed svrg------------------------
    def start_aggregation_FSVRG(self):#向下传递开始聚合的命令并进入聚合状态
        print("start_aggregation")
        self.state_lock.acquire()
        self.state=2
        self.state_lock.release()
        self.clear_buffer()
        if(self.n_down_server!=0):#向子节点传递开始联邦学习的命令
            for ds in self.down_server:
                self.command_send((ds,self.port),'{"order":"aggregation_confirm"}')
        if(self.isparticipant):
            self.local_model_train_only_gradient()#开始本地训练
        if(self.n_down_server!=0):#有子节点才需要等待接收
            self.wait_for_child()
        #计算平均、向上传输
        self.state_lock.acquire()
        self.state=3
        self.state_lock.release()
        #判断是否参与聚合
        aggregate_empty=True#如果自己与子节点都不参与则不参与聚合
        if self.isparticipant:
            aggregate_empty=False
        else:
            for i in range(0,self.n_down_server):
                if self.is_child_participant[i]==1:
                    aggregate_empty=False#自己不参与,但有子节点参与
        if aggregate_empty:
            self.G_s.send((self.up_server,self.port),{"call_back":"cb_child_aggregation","weight":str(0)},b"aggregate_empty")#向上层发送聚合结果
            return

        if not self.isroot:#非根节点
            all_weight=self.add_aggregate_gradient()#本地聚合
            with open("./model/AggregateGradient.h5","rb") as f:
                data_bytes = f.read()
            self.G_s.send((self.up_server,self.port),{"call_back":"cb_child_aggregation","weight":str(all_weight)},data_bytes)#向上层发送聚合结果
            self.state_lock.acquire()#非root进入状态1等待分发新模型
            self.state=1
            self.state_lock.release()
        else:#是根节点
            if aggregate_empty:#本节点与子节点都不参与聚合
                print("this server and it's down server don't take part in the aggregration")
                self.state_lock.acquire()
                self.state=1
                self.state_lock.release()
                return
            self.average_aggregate_gradient_root_FSVRG()#计算平均
            #开始分发全局梯度
            self.start_update_FSVRG()
        return
    def local_model_train_only_gradient(self):
        print("start local_model_train_only_gradient")
        mnist = tf.keras.datasets.mnist

        x_train=self.train_x
        y_train=self.train_y

        gradient_list=self.compute_local_gradient(x_train, y_train)
        
        for i in range(self.n_data_layer):
            j=self.model_data_layer_index[i]
            self.temp_model.layers[j].set_weights(gradient_list[i])

        self.temp_model.save_weights("./model/local_gradient.h5")
        print("local_model_train_only_gradient complete")
        return
    def average_aggregate_gradient_root_FSVRG(self):
        all_weight=self.add_aggregate_gradient()
        aggregate_layer=[]
        new_layer=[]
        for i in self.model_data_layer_index:
            aggregate_layer.append(self.temp_model.layers[i].get_weights())
        for i in range(0,self.n_data_layer):
            new_layer_data_i=[]
            for j in range(0,len(aggregate_layer[i])):
                new_layer_data_i.append(np.zeros(aggregate_layer[i][j].shape))
            new_layer.append(np.array(new_layer_data_i))
            new_layer[i]+=aggregate_layer[i]
            new_layer[i]=new_layer[i]/all_weight

            self.temp_model.layers[self.model_data_layer_index[i]].set_weights(new_layer[i])
        self.temp_model.save_weights("./model/global_gradients.h5")
        print("average_aggregate_model_root_FSVRG complete")
        return
    def start_update_FSVRG(self):
        self.state_lock.acquire()
        self.state=4
        self.state_lock.release()
        with open("./model/global_gradients.h5","rb") as f:
            data_bytes = f.read()
        for i in range(0,self.n_down_server):#向下层发送全局梯度
            print("send global_gradients to "+self.down_server[i])
            self.G_s.send((self.down_server[i],self.port),{"call_back":"cb_update_FSVRG"},data_bytes)
        self.start_aggregation_FSVRG2()
        return
    def cb_update_FSVRG(self,data,json_data):
        addr=json_data["addr"]
        up_ip=addr[0]
        if up_ip != self.up_server:
            raise ValueError("receive data from"+up_ip+" ,but this ip is not my up server")
        with open("./model/global_gradients.h5","wb") as f:
            f.write(data)
        self.start_update_FSVRG()
        return
    def start_aggregation_FSVRG2(self):#向下传递开始聚合的命令并进入聚合状态
        print("start_aggregation_FSVRG2")
        self.state_lock.acquire()
        self.state=2
        self.state_lock.release()
        self.clear_buffer()
        if(self.isparticipant):
            self.local_model_train_FSVRG()#开始本地训练
        if(self.n_down_server!=0):#有子节点才需要等待接收
            self.wait_for_child()
        #计算平均、向上传输
        self.state_lock.acquire()
        self.state=3
        self.state_lock.release()
        #判断是否参与聚合
        aggregate_empty=True#如果自己与子节点都不参与则不参与聚合
        if self.isparticipant:
            aggregate_empty=False
        else:
            for i in range(0,self.n_down_server):
                if self.is_child_participant[i]==1:
                    aggregate_empty=False#自己不参与,但有子节点参与
        if aggregate_empty:
            self.G_s.send((self.up_server,self.port),{"call_back":"cb_child_aggregation","weight":str(0)},b"aggregate_empty")#向上层发送聚合结果
            return

        if not self.isroot:#非根节点
            all_weight=self.add_aggregate_gradient()#本地聚合
            with open("./model/AggregateGradient.h5","rb") as f:
                data_bytes = f.read()
            self.G_s.send((self.up_server,self.port),{"call_back":"cb_child_aggregation","weight":str(all_weight)},data_bytes)#向上层发送聚合结果
            self.state_lock.acquire()#非root进入状态1等待分发新模型
            self.state=1
            self.state_lock.release()
        else:#是根节点
            if aggregate_empty:#本节点与子节点都不参与聚合
                print("this server and it's down server don't take part in the aggregration")
                self.state_lock.acquire()
                self.state=1
                self.state_lock.release()
                return
            if self.optimizer=="FSVRG":
                self.average_aggregate_gradient_root()#计算平均
            elif self.optimizer=="FSVRGop":
                self.average_aggregate_gradient_root_op()#计算平均
            #开始分发模型
            self.start_update()
        return
    def local_model_train_FSVRG(self):
        h=self.FSVRG_h
        m=self.FSVRG_m
        print("start local_model_train")
        x_train=self.train_x
        y_train=self.train_y


        global_gradients=self.get_weights_from_h5("./model/global_gradients.h5")
        self.local_model.save_weights("./model/previous.h5")

        self.local_SVRG(x_train,y_train,m,h,global_gradients)

        new_weights=self.local_model.get_weights()
        old_weights=self.get_weights_from_h5("./model/previous.h5")
        old_new=old_weights-new_weights
        self.temp_model.set_weights(old_new)
        self.temp_model.save_weights("./model/local_gradient.h5")
        return
    def average_aggregate_gradient_root_op(self):
        all_weight=self.add_aggregate_gradient()
        aggregate_weights=self.get_weights_from_h5("./model/AggregateGradient.h5")
        previous_weights=self.get_weights_from_h5("./model/previous.h5")
        if self.FL_round>self.glimmer_op.FL_round:#不相等说明上一轮该节点不是根节点，需要更新
            send_dic={}
            send_dic["order"]="send_op_data"
            send_dic["root_ip"]=self.local_ip
            send_dic["FL_round"]=self.FL_round
            for ip in self.down_server:
                self.command_send((ip,self.port),json.dumps(send_dic))
            self.wait_for_flag("glimmer_op_update")
        change_weights=self.glimmer_op.get_new_chage_weights(aggregate_weights/all_weight)
        new_weights=previous_weights-change_weights
        self.temp_model.set_weights(new_weights)
        self.temp_model.save_weights('./model/UpdateModel.h5')
        print("average_aggregate_model_root complete")
        return#FSVRGop

    @tf.function
    def compute_local_gradient(self,train_x_,train_y_):
        gradient_list=[]
        predict_y=self.local_model(train_x_)
        mse = tf.losses.mean_squared_error(train_y_, predict_y)
        mse_ave=tf.reduce_mean(mse)#对mse取平均

        for j in self.model_data_layer_index:
            do_dx = tf.gradients(mse_ave, self.local_model.layers[j].trainable_variables)
            gradient_list.append(do_dx)
        return gradient_list
    @tf.function
    def compute_SVRG_gradient(self,train_x_,train_y_):
        di_dwk_list=[]
        di_dwt_list=[]
        predict_yt=self.temp_model(train_x_)   #不变的wt
        predict_yk=self.local_model(train_x_) #在变的wk
        mset = tf.losses.mean_squared_error(train_y_, predict_yt)
        msek = tf.losses.mean_squared_error(train_y_, predict_yk)
        for ii in range(self.n_data_layer):
            j=self.model_data_layer_index[ii]
            di_dwk = tf.gradients(msek, self.local_model.layers[j].trainable_variables)
            di_dwt = tf.gradients(mset, self.temp_model.layers[j].trainable_variables)
            di_dwk_list.append(di_dwk)
            di_dwt_list.append(di_dwt)
        return di_dwk_list,di_dwt_list
    def local_SVRG(self,train_x,train_y,m,h,global_gradients):
        def layers_weights_reshape(layers_weights):
            ans=[]
            for i in range(len(layers_weights)):
                for j in range(len(layers_weights[i])):
                    ans.append(layers_weights[i][j])
            return np.array(ans)

        for i in range(m):
            choosed_i=np.random.randint(0,len(train_x))
            train_x_i=tf.reshape(train_x[choosed_i], [1,28,28])
            di_dwk,di_dwt=self.compute_SVRG_gradient(train_x_i,train_y[choosed_i])
            di_dwk=layers_weights_reshape(di_dwk)
            di_dwt=layers_weights_reshape(di_dwt)
            wk=self.local_model.get_weights()
            wk-=h*(di_dwk-di_dwt+global_gradients)
            self.local_model.set_weights(wk)
        return






    #---------------基本工具-----------------
    def clear_buffer(self):#初始化缓存
        self.is_child_participant=[]#0未初始化不知道是否参与，1参与，-1不参与
        self.received_weight=[]#接收各个ip下数据的权重，在发送最后一个数据时发送
        for i in self.down_server:
            self.is_child_participant.append(0)
            self.received_weight.append(0)
        self.n_down_server=len(self.down_server)
        return
    def udp_send(self,address,message):
        client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        client_socket.sendto(message, address) #将msg内容发送给指定接收方
        client_socket.close();
        return
    def command_send(self,address,message :str):#需要回执的命令 成功则返回true
        message_dic=json.loads(message)
        random_id=np.random.randint(1e6)
        if "feedback_id" in message_dic or "addr" in message_dic:
            print("warning: \"feedback_id\" \"addr\" in message_dic is overrode")
        message_dic['feedback_id']=random_id
        message_dic['addr']=(self.local_ip,self.port)

        message=json.dumps(message_dic)
        self.udp_send(address,message.encode("utf-8"))
        start_time=time.time()
        check_time=0.1#检查是否收到回执的间隙
        resend_time=0.5#重发消息的间隙
        resend_n=round(resend_time/check_time)
        n=0
        while(time.time()-start_time<3):
            time.sleep(check_time)
            n+=1
            if random_id in self.command_id:
                return True
            elif n==resend_n:
                self.udp_send(address,message.encode("utf-8"))#没有相应，重发命令
                n=0
        print("node {} is unresponsive, task id:{}, message{}".format(address,random_id,message))
        return False
    def feedback(self,json_data): #已经处理过的命令返回False，新命令返回True
        id=json_data["feedback_id"]
        ip=json_data["addr"][0]
        port=json_data["addr"][1]
        if "order" in json_data:#是feedback发送来的，不是新命令
            if json_data["order"]=="feedback":
                self.command_id.add(id)
                return False#回执不需要处理
        #可能是新命令
        send_dic={"order":"feedback","feedback_id":id}
        send_dic['addr']=(self.local_ip,self.port)
        sned_json=json.dumps(send_dic)
        self.udp_send((ip,port),sned_json.encode('utf-8'))
        if id in self.command_id:
            return False
        self.command_id.add(id)
        return True
    def change_parameter(self,json_data):
        name=json_data["parameter_name"]
        value=json_data["parameter_value"]
        order="self."+name+"="+str(value)
        exec(order)
        time_str=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) 
        with open("./result.txt","a") as f:
            f.write("{}:{}\n".format(time_str,order)) 
        return
    def wait_for_flag(self,key):
        i=0
        while(True):
            if self.flag[key]:
                self.flag[key]=False
                print("\rwaite for {} complete ".format(key))
                return
            else:
                time.sleep(0.5)
                i+=1
                a=["--","\\","|","/"][i%4]
                print("\rwaite for {} {} ".format(key,a),end='')
        return
    def wait_for_child(self):
        ii=0
        def is_child_aggregate_complete():
            for i in range(self.n_down_server):
                if self.is_child_participant[i]!=1:
                    return False
            return True
        while(True):
            if is_child_aggregate_complete():
                for i in range(self.n_down_server):
                    print("\rwaite for",end='')
                    print("{:>15}:{:<2}".format(self.down_server[i],"√"),end='')
                print("")
                return
            else:
                time.sleep(0.5)
                ii+=1
                a=["--","\\","|","/"][ii%4]
                print("\rwaite for",end='')
                for j in range(self.n_down_server):
                    if self.is_child_participant[j]!=1:
                        print("{:>15}:{:<2}".format(self.down_server[j],a),end='')
                    else:
                        print("{:>15}:{:<2}".format(self.down_server[j],"√"),end='')
        return
    #---------------应用相关-----------------
    def get_data_sets(self,addr):
        # 存放图片类别和标签的列表：第0类
        list_0 = []
        label_0 = []
        # 存放图片类别和标签的列表：第1类
        list_1 = []
        label_1 = []
        # 存放图片类别和标签的列表：第2类
        list_2 = []
        label_2 = []
        # 存放图片类别和标签的列表：第3类
        list_3 = []
        label_3 = []
        # 存放图片类别和标签的列表：第4类
        list_4 = []
        label_4 = []
        # 存放图片类别和标签的列表：第5类
        list_5 = []
        label_5 = []
        # 存放图片类别和标签的列表：第6类
        list_6 = []
        label_6 = []
        # 存放图片类别和标签的列表：第6类
        list_7 = []
        label_7 = []
        # 存放图片类别和标签的列表：第8类
        list_8 = []
        label_8 = []
        # 存放图片类别和标签的列表：第9类
        list_9 = []
        label_9 = []
        # 存放图片类别和标签的列表：第10类
        list_10 = []
        label_10 = []
        for file in os.listdir(addr):  # 获得file_dir路径下的全部文件名
            image_file_path = addr+"/"+file
            for image_name in os.listdir(image_file_path):
                image_name_path = os.path.join(image_file_path, image_name)
                # 将图片存放入对应的列表

                if image_file_path[-2:] == '10':
                    list_10.append(image_name_path)
                    label_10.append(10)
                elif image_file_path[-1:] == '0':
                    list_0.append(image_name_path)
                    label_0.append(0)
                elif image_file_path[-1:] == '1':
                    list_1.append(image_name_path)
                    label_1.append(1)
                elif image_file_path[-1:] == '2':
                    list_2.append(image_name_path)
                    label_2.append(2)
                elif image_file_path[-1:] == '3':
                    list_3.append(image_name_path)
                    label_3.append(3)
                elif image_file_path[-1:] == '4':
                    list_4.append(image_name_path)
                    label_4.append(4)
                elif image_file_path[-1:] == '5':
                    list_5.append(image_name_path)
                    label_5.append(5)
                elif image_file_path[-1:] == '6':
                    list_6.append(image_name_path)
                    label_6.append(6)
                elif image_file_path[-1:] == '7':
                    list_7.append(image_name_path)
                    label_7.append(7)
                elif image_file_path[-1:] == '8':
                    list_8.append(image_name_path)
                    label_8.append(8)
                elif image_file_path[-1:] == '9':
                    list_9.append(image_name_path)
                    label_9.append(9)



        # 合并数据
        image_list = np.hstack((list_0, list_1, list_2, list_3, list_4, list_5, list_6, list_7, list_8, list_9 ,list_10))
        label_list = np.hstack((label_0, label_1, label_2, label_3, label_4, label_5, label_6, label_7, label_8, label_9,label_10))

        # 利用shuffle打乱数据
        temp = np.array([image_list, label_list])
        temp = temp.transpose()  # 转置
        np.random.shuffle(temp)
        temp = temp.transpose()  # 转置
        image_list=temp[0]
        label_list=temp[1]
        #label_list=label_list.astype(np.float)
        #label_list=label_list.astype(np.int)
        return image_list ,np.array(label_list,dtype=np.uint8)
    def read_data_sets(self,image_list):
        x_train=np.zeros((len(image_list),50,50,3))
        for i in range(0,len(image_list)):
            image = tf.io.read_file(image_list[i])
            image = tf.image.decode_jpeg(image, channels=3)
            image = tf.image.resize(image, (50, 50))
            image = image/255.0
            x_train[i]=image
        return x_train
    def update_part1(self,ip):
        print("ready to send new model to "+ip)
        self.local_model.save_weights('.//model//update_to_part1.h5')
        self.prepare_send_model((ip,self.port+1),'.//model//update_to_part1.h5',"update_model_ready",1)
        while not self.flag_start_send:
            time.sleep(1)
        self.flag_start_send=False
        print("start send new model")
        self.send_model((ip,self.port+1),"update_data")
        print("send complete")
        return

if __name__ == '__main__':
    s=FLserver("config/config.json")
    s.start()
    s.local_model.load_weights("./model/initial.h5")

    #json_data='{"parameter_name":"FSVRG_h","parameter_value":"\\"jhuio\\""}'
    #s.change_parameter(json.loads(json_data))

    

    
