import copy
import socket
import threading
import sys
import os
import struct
import torch
from torch import device, nn
import asyncio
import websockets
import json
import time

from LeNet_model_MNIST import LeNet_edge_side, LeNet_client_side
from get_data import get_mnist, dataset_iid


# 回应前端请求
async def handle_client(websocket, path):
    client_ip = websocket.remote_address[0]
    print(f"client connected from {client_ip}")

    try:
        async for message in websocket:
            if message == 'get':
                print(client_ip + " request get")
                # 当接收到 'get' 消息时，发送 ar 变量的值
                tmp = accs
                await websocket.send(json.dumps({'ack': 1,'accuracy': tmp}))
    except websockets.exceptions.ConnectionClosed:
        pass

def run_server():
    asyncio.set_event_loop(asyncio.new_event_loop())
    start_server = websockets.serve(handle_client, '0.0.0.0', 5600)
    print('websocket run at localhost:5600')
    asyncio.get_event_loop().run_until_complete(start_server)
    asyncio.get_event_loop().run_forever()

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
epochs = 2
lr = 0.001

num_clients = 2  # 终端数
if_train = 1  # 训练标志
connected = 0  # 连接终端个数
model = 0  # 接收局部模型个数
model_kind = '0'  # 模型种类
lock_con = threading.Lock()
lock_model = threading.Lock()
lock_kind = threading.Lock()
client_model = 0  # 终端模型种类同步
net = 0
is_agg = 0
Net = {}

# ===================================================================================
# For edge Side Loss and Accuracy
loss_train_collect = []
acc_train_collect = []
loss_test_collect = []
acc_test_collect = []
batch_acc_train = []
batch_loss_train = []
batch_acc_test = []
batch_loss_test = []

criterion = nn.CrossEntropyLoss()
count1 = 0
count2 = 0

# to print train - test together in each round-- these are made global
acc_avg_all_user_train = 0
loss_avg_all_user_train = 0
loss_train_collect_user = []
acc_train_collect_user = []
loss_test_collect_user = []
acc_test_collect_user = []

# client idx collector
idx_collect = []
fed_check = False
accs = []

# ====================================================================================================
#                                  edge Side Program
# ====================================================================================================
def calculate_accuracy(fx, y):
    preds = fx.max(1, keepdim=True)[1]
    correct = preds.eq(y.view_as(preds)).sum()
    acc = 100.00 * correct.float() / preds.shape[0]
    print('acc:', acc.tolist())
    global accs
    accs.append(acc.tolist())
    print(accs)
    return acc

# edge-side function associated with Training
def train_edge(fx_client, labels):

    net_glob_edge.train()
    optimizer_edge = torch.optim.Adam(net_glob_edge.parameters(), lr)

    # train and update
    optimizer_edge.zero_grad()

    fx_client = fx_client.to(device)
    labels = labels.to(device)

    # ---------forward prop-------------
    fx_edge = net_glob_edge(fx_client)

    # calculate loss
    loss = criterion(fx_edge, labels)
    # calculate accuracy
    calculate_accuracy(fx_edge, labels)
    print(accs)

    # --------backward prop--------------
    loss.backward()
    dfx_client = fx_client.grad.clone().detach()
    optimizer_edge.step()

    # batch_loss_train.append(loss.item())
    # batch_acc_train.append(acc.item())

    # send gradients to the client
    return dfx_client


# edge-side functions associated with Testing
def evaluate_edge(fx_client, y, idx, len_batch, ell):
    global net_glob_edge, criterion, batch_acc_test, batch_loss_test
    global loss_test_collect, acc_test_collect, count2, num_clients, acc_avg_train_all, loss_avg_train_all, fed_check
    global loss_test_collect_user, acc_test_collect_user, acc_avg_all_user_train, loss_avg_all_user_train

    net_glob_edge.eval()

    with torch.no_grad():
        fx_client = fx_client.to(device)
        y = y.to(device)
        # ---------forward prop-------------
        fx_edge = net_glob_edge(fx_client)

        # calculate loss
        loss = criterion(fx_edge, y)
        # calculate accuracy
        acc = calculate_accuracy(fx_edge, y)

        batch_loss_test.append(loss.item())
        batch_acc_test.append(acc.item())

        count2 += 1
        if count2 == len_batch:
            acc_avg_test = sum(batch_acc_test) / len(batch_acc_test)
            loss_avg_test = sum(batch_loss_test) / len(batch_loss_test)

            batch_acc_test = []
            batch_loss_test = []
            count2 = 0

            # prGreen('Client{} Test =>\tAcc: {:.3f} \tLoss: {:.4f}'.format(idx, acc_avg_test,
            #                                                                                 loss_avg_test))

            # Store the last accuracy and loss
            acc_avg_test_all = acc_avg_test
            loss_avg_test_all = loss_avg_test

            loss_test_collect_user.append(loss_avg_test_all)
            acc_test_collect_user.append(acc_avg_test_all)

            # if all users are served for one round ----------
            if fed_check:
                fed_check = False

                acc_avg_all_user = sum(acc_test_collect_user) / len(acc_test_collect_user)
                loss_avg_all_user = sum(loss_test_collect_user) / len(loss_test_collect_user)

                loss_test_collect.append(loss_avg_all_user)
                acc_test_collect.append(acc_avg_all_user)
                acc_test_collect_user = []
                loss_test_collect_user = []

                print("====================== EDGE V1==========================")
                print(' Train: Round {:3d}, Avg Accuracy {:.3f} | Avg Loss {:.3f}'.format(ell, acc_avg_all_user_train,
                                                                                          loss_avg_all_user_train))
                print(' Test: Round {:3d}, Avg Accuracy {:.3f} | Avg Loss {:.3f}'.format(ell, acc_avg_all_user,
                                                                                         loss_avg_all_user))
                print("==========================================================")

    return



def send_string(con, string):  # 向客户端发送关闭指令
    con.send(string.encode())


def send_file(con, file_addr):
    while True:
        filepath = file_addr  # 传输文件的路径
        if os.path.isfile(filepath):
            # 定义定义文件信息。128s表示文件名为128bytes长，l表示一个int或log文件类型，在此为文件大小
            fileinfo_size = struct.calcsize('128sq')
            # 定义文件头信息，包含文件名和文件大小
            fhead = struct.pack('128sq', bytes(os.path.basename(filepath).encode('utf-8')), os.stat(filepath).st_size)
            con.send(fhead)
            print('client filepath: {0}'.format(filepath))
            with open(filepath, 'rb') as fp:
                while True:
                    data = fp.read(1024)
                    if not data:
                        print('{0} file send over...'.format(filepath))
                        break
                    con.send(data)
        break


def deal_data(conn, port):
    print('Accept new connection from {0}'.format(port))
    while True:
        fileinfo_size = struct.calcsize('128sq')  # linux 和 windows 互传 128sl 改为 128sq  机器位数不一样，一个32位一个64位
        buf = conn.recv(fileinfo_size)
        print('收到的字节流：', buf, type(buf))
        if buf:
            print(buf, type(buf))
            filename, filesize = struct.unpack('128sq', buf)
            fn = filename.strip(str.encode('\00'))
            new_filename = os.path.join(str.encode('./'), str.encode(port) + fn)  # 收到的文件命名，按照需求更改
            print('file new name is {0}, filesize if {1}'.format(new_filename, filesize))
            recvd_size = 0  # 定义已接收文件的大小
            with open(new_filename, 'wb') as fp:
                print("start receiving...")
                while not recvd_size == filesize:
                    if filesize - recvd_size > 1024:
                        data = conn.recv(1024)
                        recvd_size += len(data)
                    else:
                        data = conn.recv(filesize - recvd_size)
                        recvd_size = filesize

                    fp.write(data)
            print("end receive...")
            return new_filename.decode()
        break


def socket_service(port):
    try:
        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        # 修改ip,此处ip必须为服务器端的ip ,linux做服务器输入ifconfig得到ip
        s.bind(('127.0.0.1', port))
        s.listen(10)

    except socket.error as msg:
        print(msg)
        sys.exit(1)
    print("Waiting...")
    conn, addr = s.accept()
    t = threading.Thread(target=server_client, args=(conn, port.__str__()))
    t.start()


def server_cloud(ip_addr, port):
    try:
        conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        # 修改ip
        conn.connect((ip_addr, port))  # 此处ip必须为服务器端的ip

    except socket.error as msg:
        print(msg)
        sys.exit(1)

    while True:
        if connected == 2:
            send_string(conn, 'ready')
            break

    command = conn.recv(1024).decode()
    global model_kind, net, if_train
    model_kind = command

    net = model_kind

    while True:
        if client_model == 2:
            send_string(conn, 'all-ready')
            break

    command = conn.recv(1024).decode()

    if command == 'train':
        if_train = 1

    while True:
        if os.path.exists('client.pth') and os.path.exists('server.pth'):
            send_file(conn, 'client.pth')
            send_file(conn, 'server.pth')
            break


def server_client(conn, port):
    global connected
    lock_con.acquire()
    connected += 1
    lock_con.release()

    while True:
        if model_kind == '0':
            send_string(conn, model_kind)
            break

    command = conn.recv(1024).decode()
    print(command)
    global client_model
    if command == 'ready':
        lock_kind.acquire()
        client_model += 1
        lock_kind.release()
        print("s receive ready:", client_model)

    while True:
        if if_train == 1:
            send_string(conn, 'train')
            print("test: send train")
            break

    net_glob_edge = LeNet_edge_side()
    if torch.cuda.device_count() > 1:
        print("We use", torch.cuda.device_count(), "GPUs")
        net_glob_edge = nn.DataParallel(net_glob_edge)  # to use the multiple GPUs

    net_glob_edge.to(device)
    print(net_glob_edge)

    while True:
        command = conn.recv(1024).decode()
        if command == 'finish':
            data = deal_data(conn, port)
            label = deal_data(conn, port)
            inputs = torch.load(data)
            label = torch.load(label)

            net_glob_edge.train()
            optimizer_edge = torch.optim.Adam(net_glob_edge.parameters(), lr)
            # train and update
            optimizer_edge.zero_grad()
            inputs = inputs.to(device)
            label = label.to(device)
            # print(inputs)
            # print(inputs.requires_grad)
            # ---------forward prop-------------
            fx_edge = net_glob_edge(inputs)
            # calculate loss
            loss = criterion(fx_edge, label)
            # print('loss:', loss)
            # calculate accuracy
            calculate_accuracy(fx_edge, label)
            # --------backward prop--------------
            loss.backward()
            dfx_client = inputs.grad.clone().detach()
            optimizer_edge.step()
            dfx = dfx_client
            # print('dfx:', dfx)

            torch.save(dfx, './grad.pt')
            send_file(conn, './grad.pt')
        elif command == 'over':
            break

    torch.save(net_glob_edge.state_dict(), port + 'server.pth')
    deal_data(conn, port)

def FedAvg(w):
    w_avg = copy.deepcopy(w[0])
    for p in w_avg.keys():
        for i in range(1, len(w)):
            w_avg[p] += w[i][p]
        w_avg[p] = torch.div(w_avg[p], len(w))
    return w_avg


if __name__ == '__main__':
    server_thread = threading.Thread(target=run_server)
    server_thread.start()
    socket_service(5555)
    socket_service(5556)
    # server_cloud('127.0.0.1', 5557)


    while True:
        if os.path.exists('5555client.pth') and os.path.exists('5556client.pth'):
            '''
            聚合算法
            假设结果为client
            '''
            net1 = LeNet_client_side()
            net2 = LeNet_client_side()
            net1.load_state_dict(torch.load('./5555client.pth'))
            net2.load_state_dict(torch.load('./5556client.pth'))
            w = [net1.state_dict(), net2.state_dict()]

            phase_model = FedAvg(w)

            torch.save(phase_model, './phase_client.pth')
            client = 0
            torch.save(client.state_dict(), 'client.pth')
            break

    while True:
        if os.path.exists('5555server.pth') and os.path.exists('5556server.pth'):
            '''
            聚合算法
            假设结果为server
            '''
            net1 = LeNet_edge_side()
            net2 = LeNet_edge_side()
            net1.load_state_dict(torch.load('./5555phase_server.pth'))
            net2.load_state_dict(torch.load('./5556phase_server.pth'))
            w = [net1.state_dict(), net2.state_dict()]

            phase_model = FedAvg(w)

            torch.save(phase_model, './phase_server.pth')
            server = 0
            torch.save(server.state_dict(), 'server.pth')
