import pickle
import threading
from time import sleep

import progressbar

from ..bootnodes import MAINNET_BOOTNODES_GETH
from ..config import ADDED_POOL, PING_PENDING_POOL, TOTAL_NODES, NULL_CHECK_INTERVAL, NULL_EPS, NULL_MAX, \
    PING_WAITING_POOL, NEIGHBORS_PENDING_POOL, NEIGHBORS_WAITING_POOL, PING_ATTEMPT, DOWN_POOL, COMPLETED_DOWN_POOL, \
    PING_MAX, PING_TIMEOUT, PING_CHECK_INTERVAL, NPING_MAX
from .status import Status
from ..data_management import remote_to_key
from protocol_utils import from_enode_uri, formatted_date


def add_boostrap_nodes(redis_db):
    for k in range(len(MAINNET_BOOTNODES_GETH)):
        remote, pubkey = from_enode_uri(MAINNET_BOOTNODES_GETH[k])
        remote_key: bytes = remote_to_key(remote)
        if redis_db.sismember(ADDED_POOL, remote_key) == 0:
            redis_db.sadd(ADDED_POOL, remote_key)
            redis_db.set(remote_key, pickle.dumps({'IP address': remote[0],
                                                   'UDP port': remote[1],
                                                   'TCP port': remote[2],
                                                   'Pending': formatted_date(),
                                                   'Ping': 0,
                                                   'Ping_ng': 0,
                                                   'Node ID': pubkey.to_hex(),
                                                   'Seen node IDs': {pubkey.to_hex()}}))
            redis_db.sadd(PING_PENDING_POOL, remote_key)


def null_delta_check(conn, null_delta, quiet=False):
    bar = None if quiet else progressbar.ProgressBar(max_value=progressbar.UnknownLength)

    while not Status.get()['stopped']:
        old = conn.redis_db.get(TOTAL_NODES)
        current = conn.redis_db.scard(ADDED_POOL)
        conn.redis_db.set(TOTAL_NODES, str(current))
        old = 0 if old is None else int(old)
        conn.logger.info('{:d} new nodes discovered in {:.1f} seconds'.format(current - old, NULL_CHECK_INTERVAL))

        if not quiet and bar is not None:
            bar.update(conn.redis_db.scard(ADDED_POOL))

        if conn.collect_neighbors:
            if current <= old + NULL_EPS:  # not enough new node were added
                null_delta -= 1
                if null_delta == 0:  # we don't need more nodes
                    conn.collect_neighbors = False
                    conn.logger.info('Stopped neighbors collect')
            else:
                null_delta = NULL_MAX  # we reset the count

        if not conn.collect_neighbors and conn.redis_db.scard(PING_PENDING_POOL) == 0 and conn.redis_db.scard(
                PING_WAITING_POOL) == 0 and conn.redis_db.scard(NEIGHBORS_PENDING_POOL) == 0 and conn.redis_db.scard(
                NEIGHBORS_WAITING_POOL) == 0:
            Status.get()['stopped'] = True  # If the collect is done and pools are empty, the crawl is finished
        else:
            sleep(NULL_CHECK_INTERVAL)

    for timer in Status.get()['timers']:
        if timer.is_alive():
            timer.cancel()
    conn.logger.info('Pending and waiting pools are now empty')
    conn.close()


def check_node_ping_timeout(redis_db, node_key):
    if redis_db.sismember(PING_WAITING_POOL, node_key):  # node hasn't responded yet, we'll retry
        redis_db.srem(PING_WAITING_POOL, node_key)
        node = pickle.loads(redis_db.get(node_key))
        if node['Ping'] < PING_ATTEMPT:
            redis_db.sadd(PING_PENDING_POOL, node_key)
        else:
            redis_db.sadd(DOWN_POOL, node_key)


def check_node_neighbor_timeout(redis_db, node_key):
    if redis_db.sismember(NEIGHBORS_WAITING_POOL, node_key):  # node hasn't responded yet, we'll retry
        redis_db.srem(NEIGHBORS_WAITING_POOL, node_key)
        node = pickle.loads(redis_db.get(node_key))
        if node['Ping_ng'] < PING_ATTEMPT:
            redis_db.sadd(NEIGHBORS_PENDING_POOL, node_key)
        else:
            redis_db.sadd(COMPLETED_DOWN_POOL, node_key)


def process_pending_pings(conn):
    """ Process the pending pool while new nodes are added to it
    """

    while not Status.get()['stopped']:
        node_keys = conn.redis_db.smembers(PING_PENDING_POOL)
        to_pull = PING_MAX - conn.redis_db.scard(PING_WAITING_POOL)
        # i = 0
        for node_key in node_keys:
            # i += 1
            if to_pull < 1 or Status.get()['stopped']:
                break
            to_pull -= 1
            conn.redis_db.srem(PING_PENDING_POOL, node_key)
            node = pickle.loads(conn.redis_db.get(node_key))
            conn.ping([node['IP address'], node['UDP port'], node['TCP port']])
            if not Status.get()['stopped']:
                timer = threading.Timer(PING_TIMEOUT, check_node_ping_timeout, args=[conn.redis_db, node_key])
                Status.get()['timers'].append(timer)
                timer.start()
        sleep(PING_CHECK_INTERVAL)


def process_pending_neighbors(conn):
    """ Process the pending pool while new nodes are added to it
    """

    while not Status.get()['stopped']:
        node_keys = conn.redis_db.smembers(NEIGHBORS_PENDING_POOL)
        to_pull = NPING_MAX - conn.redis_db.scard(NEIGHBORS_WAITING_POOL)
        for node_key in node_keys:
            if to_pull < 1 or Status.get()['stopped']:
                break
            to_pull -= 1
            conn.redis_db.srem(NEIGHBORS_PENDING_POOL, node_key)
            node = pickle.loads(conn.redis_db.get(node_key))
            conn.find_nodes_fix([node['IP address'], node['UDP port'], node['TCP port']])
            if not Status.get()['stopped']:
                timer = threading.Timer(PING_TIMEOUT, check_node_neighbor_timeout, args=[conn.redis_db, node_key])
                Status.get()['timers'].append(timer)
                timer.start()
        sleep(PING_CHECK_INTERVAL)
