import copy
import socket
import os
import sys
import struct
import threading
import torch
from torch import nn, device
from torch.utils.data import DataLoader

from LeNet_model_MNIST import LeNet_client_side
from get_data import DatasetSplit, get_mnist, dataset_iid

model_kind = '0'
# net = 0
lr = 0.001
epochs = 5
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

def send_string(con, string):  # 向客户端发送关闭指令
    con.send(string.encode())


def recv_file(conn, port):
    while True:
        fileinfo_size = struct.calcsize('128sq')  # linux 和 windows 互传 128sl 改为 128sq  机器位数不一样，一个32位一个64位
        buf = conn.recv(fileinfo_size)
        print('收到的字节流：', buf, type(buf))
        if buf:
            print(buf, type(buf))
            filename, filesize = struct.unpack('128sq', buf)
            fn = filename.strip(str.encode('\00'))
            new_filename = os.path.join(str.encode('./'), str(port).encode() + fn)  # 收到的文件命名，按照需求更改
            print('file new name is {0}, filesize if {1}'.format(new_filename, filesize))
            recvd_size = 0  # 定义已接收文件的大小
            with open(new_filename, 'wb') as fp:
                print("start receiving...")
                while not recvd_size == filesize:
                    if filesize - recvd_size > 1024:
                        data = conn.recv(1024)
                        recvd_size += len(data)
                    else:
                        data = conn.recv(filesize - recvd_size)
                        recvd_size = filesize
                    fp.write(data)
            print("end receive...")
            return new_filename.decode()
        break


def send_file(con, file_addr):
    while True:
        filepath = file_addr  # 传输文件的路径
        if os.path.isfile(filepath):
            # 定义定义文件信息。128s表示文件名为128bytes长，l表示一个int或log文件类型，在此为文件大小
            fileinfo_size = struct.calcsize('128sq')
            # 定义文件头信息，包含文件名和文件大小
            fhead = struct.pack('128sq', bytes(os.path.basename(filepath).encode('utf-8')), os.stat(filepath).st_size)
            con.send(fhead)
            print('client filepath: {0}'.format(filepath))
            with open(filepath, 'rb') as fp:
                while True:
                    data = fp.read(1024)
                    if not data:
                        print('{0} file send over...'.format(filepath))
                        break
                    con.send(data)
        break


def client_connect(ip_addr, port):
    try:
        conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        # 修改ip
        conn.connect((ip_addr, port))  # 此处ip必须为服务器端的ip

    except socket.error as msg:
        print(msg)
        sys.exit(1)

    print("c sends ready to s")
    command = conn.recv(1024).decode()
    global model_kind
    model_kind = command
    send_string(conn, 'ready')

    '''
    data spilt
    it shouldn't be here 
    '''
    num_clients = 2
    dataset_train, dataset_test = get_mnist()
    dict_users = dataset_iid(dataset_train, num_clients)
    dict_users_test = dataset_iid(dataset_test, num_clients)


    command = conn.recv(1024).decode()
    if command == 'train':
        # =====================================================================================================
        #                           Client-side Model definition
        # =====================================================================================================
        # Model at client side
        print("test: receive train")
        net_glob_client = LeNet_client_side()
        if torch.cuda.device_count() > 1:
            print("We use", torch.cuda.device_count(), "GPUs")
            net_glob_client = nn.DataParallel(
                net_glob_client)  # to use the multiple GPUs

        net_glob_client.to(device)
        print(net_glob_client)


        idx = 0
        idxs = dict_users[0]
        idxs_test = dict_users_test[0]
        ldr_train = DataLoader(DatasetSplit(dataset_train, idxs), batch_size=256 * 4, shuffle=True)
        ldr_test = DataLoader(DatasetSplit(dataset_test, idxs_test), batch_size=256 * 4, shuffle=True)

        print("data load successfully")
        for iter in range(epochs):
            print(iter)
            net = copy.deepcopy(net_glob_client).to(device)
            net.train()
            optimizer_client = torch.optim.Adam(net.parameters(), lr)
            len_batch = len(ldr_train)
            for batch_idx, (images, labels) in enumerate(ldr_train):
                images, labels = images.to(device), labels.to(device)
                optimizer_client.zero_grad()
                # ---------forward prop-------------
                fx = net(images)
                client_fx = fx.clone().detach().requires_grad_(True)

                send_string(conn, 'finish')
                torch.save(client_fx, './output.pt')
                # print('client_fx:',client_fx)
                torch.save(labels, './label.pt')
                send_file(conn, './output.pt')
                send_file(conn, './label.pt')

                dfx = recv_file(conn, port)
                dfx = torch.load(dfx)

                fx.backward(dfx)
                optimizer_client.step()

            w_client = net.state_dict()
            net_glob_client.load_state_dict(w_client)

        send_string(conn, 'over')

        torch.save(net.state_dict(), './client.pth')
        send_file(conn, './client.pth')

        path = recv_file(conn, port)

        # 加载最终模型测试准确率
        final_model = LeNet_client_side()
        final_model.load_state_dict(torch.load(path))

        with torch.no_grad():
            output = final_model(dataset_test.data)
            torch.save(output, './test_output.pt')
            torch.save(dataset_test.targets, './test_label.pt')
            send_file(conn, './test_output.pt')
            send_file(conn, './test_label.pt')


if __name__ == '__main__':
    t1 = threading.Thread(target=client_connect, args=('127.0.0.1', 5555))
    t1.start()
