import random
import sys
import threading
import time
import os.path
import logging
import zmq


# 第一步，创建一个logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)  # Log等级总开关

NBR_CLIENTS = 10
NBR_WORKERS = 3


def client_task(name, i):
    ctx = zmq.Context()
    client = ctx.socket(zmq.REQ)
    client.identity = ("%s-Client%s" % (name, i)).encode("utf-8")
    client.connect("ipc://%s-localfe.ipc" % name)
    monitor = ctx.socket(zmq.PUSH)
    monitor.connect("ipc://%s-monitor.ipc" % name)

    poller = zmq.Poller()
    poller.register(client, zmq.POLLIN)
    while True:
        time.sleep(random.randint(0, 5))
        for _ in range(random.randint(0, 15)):
            task_id = "%04X" % random.randint(0, 10000)
            logging.info("%s-Client%s: send %s" % (name, i, task_id))
            client.send_string(task_id)

            try:
                events = dict(poller.poll(10000))
            except zmq.ZMQError:
                return

            if events:
                reply = client.recv_string()
                assert reply == task_id, "expected %s, got %s" % (task_id, reply)
                # monitor.send_string(reply)
                logging.info("%s-Client%s: received %s" % (name, i, reply))
            else:
                # monitor.send_string("E: CLIENT EXIT - lost task %s" % task_id)
                return


def worker_task(name, i):
    ctx = zmq.Context()
    worker = ctx.socket(zmq.REQ)
    worker.identity = ("%s-Worker%s" % (name, i)).encode("utf-8")
    worker.connect("ipc://%s-localbe.ipc" % name)

    logging.info("%s-Worker%s: send READY" % (name, i))
    worker.send(b"READY")

    while True:
        try:
            msg = worker.recv_multipart()
            logging.info("%s-Worker%s: received %s" % (name, i, msg))
        except zmq.ZMQError:
            return
        time.sleep(random.randint(0, 1))
        logging.info("%s-Worker%s: send %s" % (name, i, msg))
        worker.send_multipart(msg)


def main(myself, peers):
    logging.info("I: preparing broker at %s…" % myself)
    ctx = zmq.Context()

    if not isinstance(myself, bytes):
        ident = myself.encode("utf-8")
    else:
        ident = myself

    cloudfe = ctx.socket(zmq.ROUTER)
    cloudfe.setsockopt(zmq.IDENTITY, ident)
    cloudfe.bind("ipc://%s-cloud.ipc" % myself)
    statebe = ctx.socket(zmq.PUB)
    statebe.bind("ipc://%s-state.ipc" % myself)

    cloudbe = ctx.socket(zmq.ROUTER)
    cloudbe.setsockopt(zmq.IDENTITY, ident)
    statefe = ctx.socket(zmq.SUB)
    statefe.setsockopt(zmq.SUBSCRIBE, b'')

    for peer in peers:
        logging.info("I: connecting to cloud frontend at %s" % peer)
        cloudbe.connect("ipc://%s-cloud.ipc" % peer)
        logging.info("I: connecting to state backend at %s" % peer)
        statefe.connect("ipc://%s-state.ipc" % peer)

    if not isinstance(peers[0], bytes):
        peers = [peer.encode("utf-8") for peer in peers]

    localfe = ctx.socket(zmq.ROUTER)
    localfe.bind("ipc://%s-localfe.ipc" % myself)
    localbe = ctx.socket(zmq.ROUTER)
    localbe.bind("ipc://%s-localbe.ipc" % myself)

    monitor = ctx.socket(zmq.PULL)
    monitor.bind("ipc://%s-monitor.ipc" % myself)

    for i in range(NBR_WORKERS):
        thread = threading.Thread(target=worker_task, args=(myself, i))
        thread.daemon = True
        thread.start()

    for i in range(NBR_CLIENTS):
        thread_c = threading.Thread(target=client_task, args=(myself, i))
        thread_c.daemon = True
        thread_c.start()

    local_capacity = 0
    cloud_capacity = 0
    workers = []

    pollerbe = zmq.Poller()
    pollerbe.register(localbe, zmq.POLLIN)
    pollerbe.register(cloudbe, zmq.POLLIN)
    pollerbe.register(statefe, zmq.POLLIN)
    pollerbe.register(monitor, zmq.POLLIN)

    while True:
        try:
            events = dict(pollerbe.poll(1000 if local_capacity else None))
        except zmq.ZMQError:
            break

        previous = local_capacity
        msg = None
        if localbe in events:
            msg = localbe.recv_multipart()
            (address, empty), msg = msg[:2], msg[2:]
            workers.append(address)
            local_capacity += 1

            logging.info("%s localbe recv msg: %s after workers queue: %s local_capacity: %d" % (myself, msg, workers,
                                                                                                 local_capacity))
            if msg[-1] == b"READY":
                msg = None
        elif cloudbe in events:
            msg = cloudbe.recv_multipart()
            (address, empty), msg = msg[:2], msg[2:]
            logging.info("%s cloudbe recv msg: %s from: %s" % (myself, msg, address))

        if msg is not None:
            address = msg[0]
            if address in peers:
                logging.info("%s cloudfe send msg: %s to: %s" % (myself, msg[2:], address))
                cloudfe.send_multipart(msg)
            else:
                logging.info("%s localfe send msg: %s after workers queue: %s local_capacity: %d" % (myself, msg,
                                                                                                     workers,
                                                                                                     local_capacity))
                localfe.send_multipart(msg)

        if statefe in events:
            peer, s = statefe.recv_multipart()
            logging.info("%s statefe recv from: %s state: %s" % (myself, peer, s))
            cloud_capacity = int(s)

        logging.info("%s local_capacity: %d cloud_capacity: %d" % (myself, local_capacity, cloud_capacity))
        while local_capacity + cloud_capacity:
            secondary = zmq.Poller()
            secondary.register(localfe, zmq.POLLIN)
            if local_capacity:
                secondary.register(cloudfe, zmq.POLLIN)
            events = dict(secondary.poll(0))

            if cloudfe in events:
                msg = cloudfe.recv_multipart()
                logging.info("%s cloudfe recv msg: %s" % (myself, msg))
            elif localfe in events:
                msg = localfe.recv_multipart()
                logging.info("%s localfe recv msg: %s" % (myself, msg))
            else:
                logging.info('pollerfe.poll break')
                break

            if local_capacity:
                msg = [workers.pop(0), b''] + msg
                localbe.send_multipart(msg)
                local_capacity -= 1
                logging.info("%s localbe send msg: %s after workers queue: %s local_capacity: %d" % (myself, msg,
                                                                                                     workers,
                                                                                                     local_capacity))
            else:
                msg = [random.choice(peers), b''] + msg
                cloudbe.send_multipart(msg)
                logging.info("%s cloudbe send msg: %s" % (myself, msg))

        logging.info('workers is empty local_capacity: %s previous: %s' % (local_capacity, previous))
        if local_capacity != previous:
            state = str(local_capacity).encode("utf-8")
            logging.info('%s statebe send msg: %s' % (myself, [ident, state]))
            statebe.send_multipart([ident, state])


if __name__ == "__main__":
    if len(sys.argv) >= 2:
        # 第二步，创建一个handler，用于写入日志文件
        logfile = os.path.join(os.getcwd(), 'logs', sys.argv[1] + '.log')
        fh = logging.FileHandler(logfile, mode='w')
        fh.setLevel(logging.DEBUG)  # 输出到file的log等级的开关
        # 第三步，定义handler的输出格式
        formatter = logging.Formatter("%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s")
        fh.setFormatter(formatter)
        # 第四步，将logger添加到handler里面
        logger.addHandler(fh)

        main(myself=sys.argv[1], peers=sys.argv[2:])
    else:
        sys.exit(1)
