import logging
import sys
import signal
import time
import argparse
import web3.exceptions
import asyncio
import multiprocessing
from fl.config import Config
from fl.util.chain import BlockchainManager
from fl.util.ipfs import IPFSManager
from fl.util.vrf import VRF
from fl.client import FedChainClient
from net.heartbeat import start_heartbeat
from fl.logger import logger



IS_TRAINING = False

parser = argparse.ArgumentParser(description="FebChain Client")
parser.add_argument("--eth_protocol", type=str, default="http", help="Ethereum protocol (http or https)")
parser.add_argument("--eth_host", type=str, default="127.0.0.1", help="Ethereum node IP address")
parser.add_argument("--eth_port", type=int, default="8545", help="Ethereum node port")
parser.add_argument("--ipfs_host", type=str, default="127.0.0.1", help="IPFS host")
parser.add_argument("--ipfs_port", type=int, default=5001, help="IPFS port")
parser.add_argument("--learning_rate", type=float, default=0.001, help="Learning rate for training")
parser.add_argument("--batch_size", type=int, default=32, help="Batch size for training")
parser.add_argument("--min_members", type=int, default=2, help="Minimum members for aggregation")
parser.add_argument("--dataset_path", type=str, required=True, help="dataset path")
parser.add_argument("--dataset_yaml_path", type=str, required=True, help="dataset configuration file path")
parser.add_argument("--detector_protocol", type=str, default="http", help="detector node protocol")
parser.add_argument("--detector_host", type=str, default="127.0.0.1", help="detector node IP address")
parser.add_argument("--detector_port", type=int, default=5000, help="detector node port")
parser.add_argument("--enable_detector", type=int, default=0, help="enable detector. i.e. 0-false, 1-true")
parser.add_argument("--max_round_of_FL", type=int, default=0, help="Maximum number of rounds of federated learning")


args = parser.parse_args()

eth_provider = f"{args.eth_protocol}://{args.eth_host}:{args.eth_port}"
config = Config(
    eth_provider=eth_provider,
    ipfs_host=args.ipfs_host,
    ipfs_port=args.ipfs_port,
    learning_rate=args.learning_rate,
    batch_size=args.batch_size,
    min_members=args.min_members,
    dataset_path=args.dataset_path,
    dataset_yaml_path=args.dataset_yaml_path,
    detector_protocol=args.detector_protocol,
    detector_host=args.detector_host,
    detector_port=args.detector_port,
    enable_detector= True if args.enable_detector==1 else False,
    max_round_of_FL=args.max_round_of_FL
)
def terminate_processes(process):
    logger.info("Terminating all processes...")
    process.terminate()
    process.join()
    logger.info("All processes have been terminated.")

def signal_handler(sig, frame):
    logger.info(f"Signal {sig} received. Exiting...")
    terminate_processes(process)
    sys.exit(0)


async def try_to_update(client: FedChainClient, ipfs, chain, stop_event: asyncio.Event):
    logging.debug('trying to update')
    if client.is_uploaded or stop_event.is_set():
        return

    now_round = chain.contract.functions.getCurrentRound().call()

    # 当轮次等于 0 时，说明当前区块链中不存在初始模型，所以需要先上传。
    if now_round == 0:
        logger.info("initializing global models")
        blockchain_manager.initialize_model(client.model)
        return

    logger.info("trying to update")
    # 获得最新更新的模型信息
    latest_update = chain.contract.functions.getLatestUpdate().call()

    # 获得hash, 因为hash信息在 solidity 中定义的结构体的第三位元素，所以是下标2，其他的同理
    ipfs_hash = latest_update[2]

    logger.info("training...")
    # 根据hash下载模型并训练
    parameters = ipfs.download_from_ipfs(ipfs_hash)
    new_parameters, num_examples = client.fit(parameters)
    # print(f"New parameters: {new_parameters}")
    # loss, metrics = client.evaluate(parameters)
    # print(f"Evaluation results - Loss: {loss}, Metrics: {metrics}")

    logger.info("uploading the local model")
    # 将训练完后的局部模型上传
    new_ipfs_hash = ipfs.upload_to_ipfs(new_parameters)
    logger.info(f"New ipfs hash: {new_ipfs_hash}")
    try:
        chain.submit_local_model_update(new_ipfs_hash, num_examples, now_round)
        client.lock_upload()
    except web3.exceptions.ContractLogicError as e:
        logger.warning(e.args[0])


async def handle_event(event, client, ipfs, chain, vrf, stop_event):
    global IS_TRAINING

    event_name = event['event']
    event_args = event['args']

    if event_args['isRefresh'] == 1:
        return

    if event_name == 'AggregatorSelected':
        # 判断是否是聚合节点
        if event_args['aggregator'] == chain.w3.eth.accounts[0]:
            if not IS_TRAINING:
                chain.set_max_round(config.MAX_ROUND_OF_FL)
                IS_TRAINING = True
            ipfs_hash = event_args['ipfsHash']
            chain.submit_global_model_update(ipfs_hash, config.MIN_MEMBERS)


    elif event_name == 'GlobalModelUpdated':
        logger.info("Global model updated")
        client.unlock_upload()

    elif event_name == 'LocalModelUpdatedFinished':
        logger.info("Local model updated finished")

        # 注册vrf， 然后生成并上传proof进行验证
        logger.info("trying to register vrf account")
        vrf.register_on_chain()

        logger.info("initializing seed of vrf")
        vrf.generate_seed_on_chain()
        seed = vrf.get_seed_from_chain()

        logger.info("validating proof on chain")
        r, s, v = vrf.generate_proof(seed)
        vrf.submit_proof_on_chain(r, s, v)
        logger.info("proof validated")

    elif event_name == 'StartSelectAggregators':
        # 这边暂停2s，缓和一下再关闭注册
        time.sleep(2)
        vrf.close_registry_on_chain()
        logger.info("selecting aggregators")
        chain.select_aggregators()

    elif event_name == 'ConfirmCandidateAggregators':
        arr = event_args['aggregators']
        logger.info(f"{arr}")
        if chain.account in arr:
            logger.info("Successfully was selected candidate of aggregator")

            # 进行聚合
            local_updates = chain.contract.functions.getLocalUpdates().call()
            local_weights = [(ipfs.download_from_ipfs(update[0][2]), update[1])
                             for update in local_updates]
            aggregated_weights = client.aggregate([(weights, num_examples) for weights, num_examples in local_weights])
            new_global_ipfs_hash = ipfs.upload_to_ipfs(aggregated_weights)

            chain.submit_candidate_global_model_update(new_global_ipfs_hash)
            logger.info(f"Submitted new global models update to IPFS hash: {new_global_ipfs_hash}")

    elif event_name == 'StartVote':
        candidates = chain.get_candidate_aggregators()
        if chain.account in candidates:
            candidates = chain.get_candidate_global_model_update()
            updates = [(ipfs.download_from_ipfs(update[1]), update[0])
                                 for update in candidates]
            # 进行筛选
            result = client.krum_selection(updates, int(len(updates)*0.8))

            logger.info("voting the final global model")
            chain.vote_global_model_update(result[1])

    elif event_name == 'FederatedLearningFinished':
        IS_TRAINING = False
        logger.info("Federated learning finished!")
        stop_event.set()

    elif event_name == 'VoteFinished':
        logger.info("Vote finished!")




# 开始分布式训练
async def decentralized_training(client, ipfs, chain, vrf, event_filters, stop_event):
    while not stop_event.is_set():
        for event_filter in event_filters:
            for event in event_filter.get_new_entries():
                asyncio.create_task(handle_event(event, client, ipfs, chain, vrf, stop_event))

        asyncio.create_task(try_to_update(client, ipfs, chain, stop_event))
        await asyncio.sleep(1)


def create_event_filters(blockchain_manager: BlockchainManager):
    local_update_filter = blockchain_manager.contract.events.LocalModelUpdated.create_filter(fromBlock='latest')
    global_update_filter = blockchain_manager.contract.events.GlobalModelUpdated.create_filter(fromBlock='latest')
    aggregator_filter = blockchain_manager.contract.events.AggregatorSelected.create_filter(fromBlock='latest')
    finished_filter = blockchain_manager.contract.events.FederatedLearningFinished.create_filter(fromBlock='latest')
    local_update_finished_filter = blockchain_manager.contract.events.LocalModelUpdatedFinished.create_filter(
        fromBlock='latest')
    start_select_filter = blockchain_manager.contract.events.StartSelectAggregators.create_filter(fromBlock='latest')
    confirm_candidate_aggregator_filter = blockchain_manager.contract.events.ConfirmCandidateAggregators.create_filter(
        fromBlock='latest')
    start_vote_filter = blockchain_manager.contract.events.StartVote.create_filter(fromBlock='latest')
    vote_finished_filter = blockchain_manager.contract.events.VoteFinished.create_filter(fromBlock='latest')
    return [
        local_update_filter, global_update_filter, aggregator_filter, finished_filter, local_update_finished_filter,
        start_select_filter, confirm_candidate_aggregator_filter, start_vote_filter, vote_finished_filter
    ]

def refresh_event_filters(chain: BlockchainManager, interval: int):
    while True:
        time.sleep(interval)
        logger.info("Refreshing ...")
        chain.refresh_events()

if __name__ == "__main__":
    # 初始化一些组件
    ipfs_client = IPFSManager(config)
    blockchain_manager = BlockchainManager(config, ipfs_client)
    client = FedChainClient(config, ipfs_client)
    vrf = VRF(config)

    # 如果当前节点需要被监听,则启动新进程发送心跳信息
    if config.ENABLE_DETECTOR:
        logger.info("Starting detector...")
        process = multiprocessing.Process(target=start_heartbeat, args=(config, blockchain_manager))
        process.start()

    # 开启过滤器刷新，以防过期
    refresh_p = multiprocessing.Process(target=refresh_event_filters, args=(blockchain_manager, 600))
    refresh_p.start()

    signal.signal(signal.SIGINT, signal_handler)  # Ctrl+C 信号
    signal.signal(signal.SIGTERM, signal_handler)  # 正常退出信号
    # 过滤器刷新进程
    stop_event = asyncio.Event()
    event_filters = create_event_filters(blockchain_manager)


    # 开始训练
    asyncio.run(decentralized_training(client, ipfs_client, blockchain_manager, vrf, event_filters, stop_event))

    # 关闭子进程
    refresh_p.terminate()
    if config.ENABLE_DETECTOR:
        process.terminate()
