from concurrent import futures
from io import StringIO
from io import BytesIO

import torch
# 防止重新下载东西
import os 
if os.name == 'nt':
    os.chdir("C:\\federated_malware\\Project")
    print ("cwd",os.getcwd())#获得当前目录
    print ("工作目录",os.path.abspath('.'))#获得当前工作目录
    print ("工作目录",os.path.abspath(os.curdir))#获得当前工作目录

import sys
sys.path.append('C:\\federated_malware\\Project\\')
sys.path.append('C:\\federated_malware\\Project\\rpc\\grpc_gen')
# print(sys.path)

import grpc
from grpc.experimental import aio

from rpc.grpc_gen import trainer_pb2 as pb2
from rpc.grpc_gen import trainer_pb2_grpc as pb2_grpc

import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)

from trainers.fedbase.FedClientBase import FedClientBase
import asyncio
# 导入参数------------------------------------------------------
from fl_config import dataset_params,server_params,client_params
from datasets.minist.data_load import dataManager as mdm
from models.minist.dnn_model import Net
# 重新封装一下了。专门用来做数据的转换和处理。还是提供相同的接口。
# 主要用来负责将本地的worker，通过该类代理，转换成远程worker
# 用来专门暴露对外的接口。
class trainer_servcie(pb2_grpc.trainerServicer):
    def __init__(self,worker):
        # 被装饰的对象。
        self.worker = worker

    # 这里其本身就是个字典，可以直接传。这里的消息体全部对应类
    def set_parameters(self,request,context):
        logger.info('set_parameters is called')
        client_params = {
            # 迭代轮次
            "epochs" :request.epochs,
            # batchsize 被update_steps代替了
            "batch_size":request.batch_size,
            # 本地梯度下降的步长
            "inner_lr":request.inner_lr,
            # 服务器梯度下降的步长
            "outer_lr":request.outer_lr,
            # 本地更新的次数，会覆盖batch_size。batch_size=len(dataset)/update_steps
            "update_steps": request.update_steps,
        }
        print(request)
        self.worker.set_parameters(client_params)
        return pb2.empty_response()
    
    def process_data(self,empty_request,context):
        logger.info('process_data is called')
        self.worker.process_data()
        return pb2.empty_response()

    def process_data_support_and_query(self,empty_request,context):
        logger.info('process_data_support_and_query is called')
        self.worker.process_data_support_and_query()
        return pb2.empty_response()


    def load_model_state_dict(self,request,context):
        # model_dict = covert_byte_to_tensor_dict(model_state_dict_request.tensor_dict)
        logger.info('load_model_state_dict is called')
        # tensor_dict =  request.tensor_dict
        # for name in tensor_dict:
        #     print(tensor_dict[name])
        bytes_buffer = BytesIO(request.state_dict)
        state_dict = torch.load(bytes_buffer)
        self.worker.load_model_state_dict(state_dict)
        return pb2.empty_response()

    def get_model_state_dict(self,empty_request,context):
        # tensor_dict = covert_tensor_dict_to_byte(self.worker.get_model_state_dict())
        logger.info('get_model_state_dict is called')
        bytes_buffer = BytesIO()
        torch.save(self.worker.get_model_state_dict(),bytes_buffer)
        # 测试一下map的表达方式
        response = pb2.model_state_dict_response(state_dict=bytes_buffer.getvalue())
        return response

    def save_model(self,empty_request,context):
        logger.info('save_model is called')
        self.worker.save_model()
        return pb2.empty_response()


    def read_model(self,model_path,context):
        logger.info('read_model is called')
        self.worker.read_model()
        return pb2.empty_response()

    def train(self,empty_request,context):
        logger.info('train is called')
        result = self.worker.train()
        return pb2.train_response(result=result)

    def test(self,empty_request,context):
        logger.info('test is called')
        result = self.worker.test()
        return pb2.test_response(result=result)

# 阻塞grpc的服务器。用来进行线程、进程级别的仿真测试
def start_server(worker:FedClientBase,worker_id:str):
    # await asyncio.sleep(1)
    # print("grpc server start1...")
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=30))
    pb2_grpc.add_trainerServicer_to_server(trainer_servcie(worker),server)
    server.add_insecure_port(worker_id)
    server.start()
    print("grpc server start...")
    server.wait_for_termination()

# 协程异步版本的的服务器。用来进行单机模拟
async def start_server_async(worker:FedClientBase,worker_id:str):
    # await asyncio.sleep(1)
    # print("grpc server start1...")

    server = aio.server(futures.ThreadPoolExecutor(max_workers=30))
    pb2_grpc.add_trainerServicer_to_server(trainer_servcie(worker),server)
    server.add_insecure_port(worker_id)
    await server.start()
    print("grpc server start...")
    await server.wait_for_termination()

if __name__ == '__main__':
        # 暂时表示这样填充主要是为了后续的输入提示。到时候。worker应该初始化好了传进来。
    # worker本身应该作为参数的一部分。或者放在这里初始化好像也不是不行
    model = Net()
    mdm_avg = mdm.allocate_data_avg(client_number=dataset_params["client_number"])
    mdm_test = mdm.allocate_data_test()
    worker = FedClientBase(mdm_avg[0],model)
    start_server(worker)