/*
 * Copyright (c) 2010 Vadim Zhukov <persgray@gmail.com>
 *
 * Permission to use, copy, modify, and distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
 * copyright notice and this permission notice appear in all copies.
 *
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 */


#include "common.h"

#include <sys/socket.h>
#include <sys/un.h>
#include <netinet/in.h>
#include <arpa/nameser.h>
#include <arpa/inet.h>

#include <assert.h>
#include <err.h>
#include <poll.h>
#include <resolv.h>
#include <signal.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>

#include "../assl/assl.h"
#include "peertask.h"
#include "peer.h"
#include "resolver.h"
#include "timestamp.h"


#define DNS_CHECK_IN		60	/* Check DNS for collectors list */
                                        /* changes each N sec */
#define DEF_PEER_DNS_PREFIX	"_overwatch._tcp."

TAILQ_HEAD(task_broadcast_list, task_broadcast)	task_broadcasts =
    TAILQ_HEAD_INITIALIZER(task_broadcasts);
struct task_broadcast {
	TAILQ_ENTRY(task_broadcast)	  entry;
	struct peer			 *monitor;
	struct peer			**queried;
	struct peer_task		 *requests;
	size_t				  req_count;
};

extern char			*__progname;
int				 debug = 0;

static struct peer_list		 collectors = TAILQ_HEAD_INITIALIZER(collectors);
static struct peer_list		 filters = TAILQ_HEAD_INITIALIZER(filters);
static struct peer_list		 monitors = TAILQ_HEAD_INITIALIZER(monitors);
static struct peer_task_queue	 ls_requests =
    SIMPLEQ_HEAD_INITIALIZER(ls_requests);
static size_t			 ncollectors = 0;
static size_t			 nfilters = 0;
static size_t			 nmonitors = 0;
static volatile sig_atomic_t	 quit_requested = 0;

#define peer_is_filter(peer)	((peer) == TAILQ_FIRST(&filters))
#define peer_is_collector(peer)	(peer_is_in_list(&collectors, (peer)))
#define peer_is_monitor(peer)	(peer_is_in_list(&monitors, (peer)))


static struct pollfd 	*build_poll_pool(size_t *, size_t, struct peer_list *,
			    ...);
static int		 check_filter_connection(struct peer *);
static int		 peer_is_in_list(const struct peer_list *,
			    const struct peer *);
static struct peer	*find_peer_by_name(struct peer_list *, const char *);
static void		 sighandler(int);
static __dead void	 usage(const char *);

static int	handle_peer_hello(struct peer *, const struct peer_task *,
		    struct peer_list *);
static int	init_monitor_session(struct peer *, struct peer_task *);

/* Peer fail handlers */
static void	handle_monitor_fail(struct peer *);
static void	handle_coalegue_fail(struct peer *);
static void	handle_filter_fail(struct peer *);

/* Task handlers */
static int	forward_to_filter(struct peer *, struct peer_task *);
static int	handle_monitor_hello(struct peer *, struct peer_task *);
static int	handle_coalegue_hello(struct peer *, struct peer_task *);
static int	handle_lastsess_query(struct peer *, struct peer_task *);
static int	handle_lastsess_response(struct peer *, struct peer_task *);
static int	handle_conf_change(struct peer *, struct peer_task *);
static int	handle_filter_msg(struct peer *, struct peer_task *);
static int	handle_ctrl_msg(struct peer *, struct peer_task *);


struct peer_task_handler	monitor_handlers[] = {
{ &forward_to_filter,
	PEER_QUEUE_INPUT,
		PEER_CMD_LINE,
			PEER_TH_F_SUCCESS },
{ &handle_monitor_hello,
	PEER_QUEUE_INPUT,
		PEER_CMD_HELLO,
			PEER_TH_F_SUCCESS },
};

struct peer_task_handler	filter_handlers[] = {
{ &handle_lastsess_response,
	PEER_QUEUE_INPUT,
		PEER_CMD_LAST_SESSION_RESPONSE,
			PEER_TH_F_SUCCESS },
{ &handle_conf_change,
	PEER_QUEUE_INPUT,
		PEER_CMD_CONF_CHANGED,
			PEER_TH_F_SUCCESS },
{ &handle_filter_msg,
	PEER_QUEUE_INPUT,
		PEER_CMD_UNKNOWN,
			PEER_TH_F_SUCCESS },
};

struct peer_task_handler	coalegue_handlers[] = {
{ &handle_coalegue_hello,
	PEER_QUEUE_INPUT,
		PEER_CMD_HELLO,
			PEER_TH_F_SUCCESS },
{ &handle_lastsess_query,
	PEER_QUEUE_INPUT,
		PEER_CMD_LAST_SESSION_QUERY,
			PEER_TH_F_SUCCESS },
{ &handle_lastsess_response,
	PEER_QUEUE_INPUT,
		PEER_CMD_LAST_SESSION_RESPONSE,
			PEER_TH_F_SUCCESS },
{ &handle_conf_change,
	PEER_QUEUE_INPUT,
		PEER_CMD_CONF_CHANGED,
			PEER_TH_F_SUCCESS },
{ &forward_to_filter,
	PEER_QUEUE_INPUT,
		PEER_CMD_UNKNOWN,
			PEER_TH_F_SUCCESS },
};

struct peer_task_handler	ctrl_handlers[] = {
{ &handle_ctrl_msg,
	PEER_QUEUE_INPUT,
		PEER_CMD_UNKNOWN,
			PEER_TH_F_SUCCESS },
};


/*
 * Handle situatuions when peer gets status PEER_STATUS_FAILED.
 *
 * NOTE: handle_xxx_fail() routines may free the peer given.
 */

void
handle_monitor_fail(struct peer *peer) {
	DPRINTFX(3, "monitor %s failed, removing", peer->name);
	TAILQ_REMOVE(&monitors, peer, entry);
	peer_free(peer);
}

void
handle_coalegue_fail(struct peer *peer) {
	DPRINTFX(2, "collector %s failed, dropping connection", peer->name);
	peer_close(peer);
}

void
handle_filter_fail(struct peer *peer) {
	DPRINTFX(1, "filter %s failed, dropping connection", peer->name);
	peer_close(peer);
}


/*
 * Handle peer tasks. Some routines used more than for one peer type, beware.
 */

int
forward_to_filter(struct peer *sender, struct peer_task *task) {
	struct peer_data_buffer	*buf;

	/* Prepend data buffers with peer's name and give task to filter */
	buf = NULL;
	if (!peer_session_established(sender)) {
		DPRINTFX(1, "forward_to_filter: no session established");
		goto err;
	}
	if (task_buf_count(task) >= MAX_TASK_BUFFERS) {
		DPRINTFX(0, "forward_to_filter: not enough data buffers");
		goto err;
	}
	buf = peer_new_data_buffer(strlen(sender->name), sender->name);
	if (buf == NULL) {
		DPRINTF(0, "forward_to_filter: peer_new_data_buffer");
		goto err;
	}
	peer_add_task_data(task, buf, 0);
	if (peer_add_task(TAILQ_FIRST(&filters), task) == -1) {
		DPRINTF(0, "forward_to_filter: peer_add_task");
		goto err;
	}
	return PEER_TH_CB_CARED;
	
err:
	if (buf != NULL)
		peer_free_data_buffer(buf);
	sender->status = PEER_STATUS_FAILED;
	return PEER_TH_CB_FREE;
}

int
handle_peer_hello(struct peer *sender, const struct peer_task *task,
    struct peer_list *plist) {
	struct peer	*p, *p2;

	if (sender->status != PEER_STATUS_CONNECTED) {
		DPRINTFX(1, "handle_peer_hello: peer is not connected, "
		    "status is %s", peer_status_name(sender));
		goto err;
	}
	if (strlcpy(sender->name, task->databuf[0]->data, sizeof(sender->name))
	    >= sizeof(sender->name)) {
		DPRINTFX(1, "handle_peer_hello: too long name sent by peer: %s,"
		    " closing connection", sender->name);
		goto err;
	}
	sender->status = PEER_STATUS_HELLO_SENT;
	for (p = TAILQ_FIRST(plist); p != NULL; p = p2) {
		p2 = TAILQ_NEXT(p, entry);
		if (p == sender)
			continue;
		if (strcmp(p->name, sender->name))
			continue;
		DPRINTFX(3, "closing previous session %u of peer %s",
		    (unsigned int)p->session, p->name);
		sender->session = max(sender->session, p->session);
		TAILQ_REMOVE(plist, p, entry);
		peer_free(p);
	}
	sender->status = PEER_STATUS_SESSION;
	return 0;

err:
	sender->status = PEER_STATUS_FAILED;
	return -1;
}

/*
 * If needed, send request to local filter and to coalegues for about ID of
 * the last session with this peer.
 *
 * On success, return 0 and "lsquery" will be cared on later.
 *
 * On error, return -1 and "lsquery" should be freed manually. NOTE: it may be
 * modified.
 */
int
init_monitor_session(struct peer *peer, struct peer_task *lsquery) {
	struct peer		*p;
	struct peer_task	*creqs;
	struct peer_data_buffer	*buf;
	size_t			 cnt, i;
	int			 oerrno;

	creqs = NULL;
	buf = NULL;

	if (peer->session) {
		peer->session++;
		if (peer->session == 0) {
			DPRINTFX(2, "session ID for %s wrapped", peer->name);
			peer->session = 1;
		}
		peer->status = PEER_STATUS_SESSION;
		peer_convert_task_to_hello_response(lsquery, peer->session);
		if (peer_add_task(peer, lsquery) == -1) {
			DPRINTF(0, "init_session(%s): peer_add_task",
			    peer->name);
			peer->status = PEER_STATUS_FAILED;
			goto err;
		}
		return 0;
	}

	/* Get number of working coalegues */
	cnt = 0;
	TAILQ_FOREACH(p, &collectors, entry)
		if (p->status == PEER_STATUS_SESSION)
			cnt++;

	/* Create requests */
	creqs = peer_new_task(cnt + 1, lsquery);
	if (creqs == NULL)
		goto err;
	/* Now we'll care about given task, or remove it back from queue. */
	for (i = 0; i < cnt + 1; i++)
		/* Will not fail */
		peer_add_task_data(&creqs[i], lsquery->databuf[0], 0);

	/* Send requests. Local filter should not fail, others may. */
	p = TAILQ_FIRST(&filters);
	if (peer_add_task(p, &creqs[0]) == -1) {
		DPRINTF(0, "init_monitor_session: peer_add_task(filter)");
		p->status = PEER_STATUS_FAILED;
		goto err;
	}
	SIMPLEQ_INSERT_TAIL(&ls_requests, lsquery, entry);
	i = 1;
	TAILQ_FOREACH(p, &collectors, entry)
		if (p->status == PEER_STATUS_SESSION) {
			if (peer_add_task(p, &creqs[i]) == -1) {
				creqs[i].error = errno;
				creqs[i].phase = PEER_TASK_PHASE_DONE;
				DPRINTF(0, "init_monitor_session: "
				    "peer_add_task(%s)", p->name);
				p->status = PEER_STATUS_FAILED;
			}
			i++;
		}
	return 0;

err:
	oerrno = errno;
	DPRINTF(0, "init_session(%s)", peer->name);
	if (creqs != NULL)
		peer_free_task(creqs, nmonitors + 1);
	peer->status = PEER_STATUS_FAILED;
	errno = oerrno;
	return -1;
}

int
handle_coalegue_hello(struct peer *sender, struct peer_task *task) {
	(void)handle_peer_hello(sender, task, &collectors);
	return PEER_TH_CB_FREE;
}

int
handle_monitor_hello(struct peer *sender, struct peer_task *task) {
	if (handle_peer_hello(sender, task, &monitors))
		return PEER_TH_CB_FREE;
	if (init_monitor_session(sender, task))
		return PEER_TH_CB_FREE;
	/* See init_monitor_session() description for details on return code. */
	return PEER_TH_CB_CARED;
}

int
handle_lastsess_query(struct peer *sender, struct peer_task *q) {
	struct peer				*p;
	struct peer_task			*notify;
	struct peer_data_buffer			*buf;
	struct peer_msg_last_session_resp	*r;

	if (task_buf_count(q) == 0) {
		DPRINTFX(1, "handle_lastsess_query: incorrect query");
		sender->status = PEER_STATUS_FAILED;
		return PEER_TH_CB_FREE;
	}
	p = find_peer_by_name(&monitors, q->databuf[0]->data);
	if (p != NULL && p->session) {
		notify = peer_new_task(1, NULL);
		if (notify == NULL) {
			DPRINTF(1, "handle_lastsess_query: peer_new_task");
			/* Do at least what we can do there */
			peer_close(p);
			goto sendreply;
		}
		peer_init_error_task(notify, PEER_ERR_EXIST);
		peer_add_task(p, notify);

sendreply:
		buf = peer_new_data_buffer(
		    sizeof(struct peer_msg_last_session_resp), NULL);
		if (buf == NULL) {
			DPRINTF(1,
			    "handle_lastsess_query: peer_new_data_buffer");
			sender->status = PEER_STATUS_FAILED;
			return PEER_TH_CB_FREE;
		}
		r = (struct peer_msg_last_session_resp *)buf->data;
		r->lastsession = p->session;
		peer_convert_task_to_last_sess_response(q);
		peer_add_task_data(q, buf, -1);
		if (peer_add_task(sender, q)) {
			DPRINTF(1, "handle_lastsess_query: peer_add_task");
			sender->status = PEER_STATUS_FAILED;
			return PEER_TH_CB_FREE;
		}
		return PEER_TH_CB_CARED;
	}
	return forward_to_filter(sender, q);
}

int
handle_lastsess_response(struct peer *sender, struct peer_task *resp) {
	struct peer				*monitor;
	struct peer_task			*req, *t, *oldp;
	struct peer_msg_last_session_resp	*rd;
	size_t					 i, count;
	uint32_t				 sess_id;
	int					 rv;

	if (task_buf_count(resp) < 2)
		goto errr;
	SIMPLEQ_FOREACH(req, &ls_requests, entry) {
		if (!strcmp(resp->databuf[0]->data, req->databuf[0]->data))
			goto found;
	}
errr:
	DPRINTFX(1, "handle_lastsess_response: incorrect response");
	sender->status = PEER_STATUS_FAILED;
	return PEER_TH_CB_FREE;

found:
	assert(resp->provoker == NULL);
	assert(req->followers == NULL);
	assert(req->nfollowers == 0);
	assert(req->provoker != NULL);

	req->phase = PEER_TASK_PHASE_DONE;
	resp->provoker = req;
	req->followers = resp;
	req->nfollowers = 1;

	sess_id = 0;
	for (i = 0; i < req->provoker->nfollowers; i++) {
		t = &req->provoker->followers[i];
		if (t->phase != PEER_TASK_PHASE_DONE)
			/* Not all replies received yet */
			return PEER_TH_CB_CARED;
		if (peer_task_failed(t))
			continue;
		rd = (struct peer_msg_last_session_resp *)t->databuf[1]->data;
		sess_id = max(sess_id, rd->lastsession);
	}

	/*
	 * Now send reply to HELLO message.
	 */

	/* Save these because they'll be overriden below */
	rv = PEER_TH_CB_CARED;
	oldp = req->provoker;
	req = oldp->followers;
	count = oldp->nfollowers;

	/* Allow to be freed */
	for (i = 0; i < count; i++)
		oldp->followers[i].provoker = NULL;
	oldp->followers = NULL;
	oldp->nfollowers = 0;

	sess_id++;
	if (sess_id == 0) {
		DPRINTFX(2, "handle_lastsess_response: session ID wrapped");
		sess_id = 1;
	}
	peer_convert_task_to_hello_response(oldp, sess_id);
	monitor = find_peer_by_name(&monitors, req->databuf[0]->data);
	if (monitor == NULL)
		goto cleanup;
	if (monitor->status != PEER_STATUS_HELLO_SENT) {
		DPRINTFX(1, "handle_lastsess_response: monitor status is %s",
		    peer_status_name(monitor));
		rv = PEER_TH_CB_FREE;
		monitor->status = PEER_STATUS_FAILED;
		goto cleanup;
	}
	if (peer_add_task(monitor, oldp)) {
		DPRINTF(1, "handle_lastsess_response: peer_add_task");
		rv = PEER_TH_CB_FREE;
		monitor->status = PEER_STATUS_FAILED;
		goto cleanup;
	}
	monitor->status = PEER_STATUS_SESSION;

cleanup:
	peer_free_task(req, count);
	return rv;
}

int
handle_conf_change(struct peer *sender, struct peer_task *task) {
	struct peer_msg_conf_changed	*pmcc;
	struct peer_task		*tdst;
	struct peer			*dst;
	int				 reused;

	if (task_buf_count(task) < 2) {
		DPRINTFX(0, "handle_conf_change: not enough data buffers: %d",
		    (int)task_buf_count(task));
		goto err;
	}
	pmcc = (struct peer_msg_conf_changed *)task->databuf[0]->data;
	dst = NULL;
	if (strncmp(pmcc->objname, "/collector/", 11) == 0 ||
	    strncmp(pmcc->objname, "/filter/", 8) == 0)
		/* Filters are accessed through their collectors */
		dst = find_peer_by_name(&collectors, pmcc->objname);
	else if (strncmp(pmcc->objname, "/monitor/", 9) == 0)
		dst = find_peer_by_name(&monitors, pmcc->objname);
	if (dst != NULL) {
		peer_add_task(dst, task);
		return PEER_TH_CB_CARED;
	}
	/* Resend to all collectors */
	reused = 0;
	TAILQ_FOREACH(dst, &collectors, entry)
		if (!peer_is_closed(dst)) {
			if (!reused)
				tdst = task;
			else {
				tdst = peer_new_task(1, NULL);
				if (tdst == NULL) {
					/* XXX: fail more */
					DPRINTF(1, "peer_new_task");
					return PEER_TH_CB_CARED;
				}
				peer_init_task_from(tdst, task, 1);
			}
			peer_add_task(dst, tdst);
			reused = 1;
		}
	return (reused ? PEER_TH_CB_CARED : PEER_TH_CB_FREE);

err:
	sender->status = PEER_STATUS_FAILED;
	return PEER_TH_CB_FREE;
}

int
handle_filter_msg(struct peer *sender, struct peer_task *task) {
	peer_add_task(TAILQ_FIRST(&filters), task);
	return PEER_TH_CB_CARED;
}

int
handle_ctrl_msg(struct peer *sender, struct peer_task *task) {
	struct peer	*f;

	f = TAILQ_FIRST(&filters);
	if (peer_add_task(f, task) == -1) {
		DPRINTF(0, "handle_ctrl_msg: peer_add_task");
		f->status = PEER_STATUS_FAILED;
		return PEER_TH_CB_FREE;
	}
	return PEER_TH_CB_CARED;
}

int
peer_is_in_list(const struct peer_list *list, const struct peer *peer) {
	struct peer	*p;

	TAILQ_FOREACH(p, list, entry)
		if (p == peer)
			return 1;
	return 0;
}

struct peer *
find_peer_by_name(struct peer_list *list, const char *name) {
	struct peer	*peer;

	TAILQ_FOREACH(peer, list, entry)
		if (strcmp(peer->name, name) == 0)
			return peer;
	return NULL;
}

void
usage(const char *msg) {
	if (msg != NULL)
		fprintf(stderr, "%s: %s", __progname, msg);
	fprintf(stderr,
"usage: %s [-d] [-b addr] [-f filter] [-P peer] [-p peerdomain] [-t rrtype]\n"
"  -b listenaddr  Listen on given address instead of system selected one.\n"
"                 You can also specify port by appending :number to listenaddr.\n"
"  -d             Do not daemonize and increase verbosity.\n"
"  -f filteraddr  Connect to filter at the address:port specified instead of\n"
"                 defaults (localhost:" FILTER_PORT ")\n"
"  -p peerdomain  DNS name to resolve for peers. Defaults to\n"
"                 \"" DEF_PEER_DNS_PREFIX "\" plus current domain name\n"
"                 (i.e. full host name without first part). Cannot be combined\n"
"                 with -f option.\n"
"  -P collector   Force connection to specified collector peer. Can be specified\n"
"                 more than once, connections will be established with all\n"
"                 peers specified.\n"
"  -t rrtype      Resource record type to resolve (A or SRV). Note: SRV\n"
"                 records are not supported yet - defaults to A until.\n",
	    __progname);
	exit(EXIT_FAILURE);
}

void
sighandler(int sig) {
	quit_requested = 1;
}

/*
 * Builds an array of pollfd strctures based on lists of peers provided. Last 
 * list pointer should be NULL.
 *
 * If "reserve" is not 0 then additional "reserve" pollfd structures are
 * allocated at the start of the array. They'll be just zeroed, further
 * initialization should be done elsewhere.
 *
 * Return an array built, or NULL if there are no fds to poll on; the size
 * of array is placed in count.
 */
struct pollfd *
build_poll_pool(size_t *count, size_t reserve, struct peer_list *peers, ...) {
	struct	peer_list *pqueue;
	struct	peer *peer;
	struct	pollfd *pool;
	va_list	ap;
	size_t	i;

	i = reserve;
	va_start(ap, peers);
	for (pqueue = peers; pqueue != NULL;
	    pqueue = va_arg(ap, struct peer_list *))
		TAILQ_FOREACH(peer, pqueue, entry)
			if (peer_connected(peer))
				i++;
	va_end(ap);
	*count = i;
	if (i == 0)
		return NULL;
	pool = calloc(i, sizeof(struct pollfd));
	if (pool == NULL)
		fatalx("build_poll_pool: calloc");
	*count = i;
	i = reserve;
	va_start(ap, peers);
	for (pqueue = peers; pqueue != NULL;
	    pqueue = va_arg(ap, struct peer_list *))
		TAILQ_FOREACH(peer, peers, entry)
			if (peer_connected(peer)) {
				pool[i].fd = peer_get_fd(peer);
				pool[i].events = POLLIN | POLLOUT;
				peer->pollptr = &pool[i];
				i++;
			} else
				peer->pollptr = NULL;
	va_end(ap);
	return pool;
}

/*
 * The logic cut out from peer_update_connections()
 */
int
check_filter_connection(struct peer *peer) {
	int	changed = 0;
	char	buf[MAXPEERADDRLEN];

	switch (peer->status) {
	case PEER_STATUS_FAILED:
		if (!inet_ntop(peer->af, &peer->addr, buf, sizeof(buf)))
			strlcpy(buf, "[inet_ntop failed]", sizeof(buf));
		DPRINTFX(2, "peer %s failed: %s", buf,
		    strerror(peer->fail_reason));
		assert(!peer_is_closed(peer));
		peer_close(peer);
		changed = 1;
		/* FALLTHROUGH */

	case PEER_STATUS_NEW:
		/* Not connected yet, retries here are infinite */
		assert(peer_is_closed(peer));
		if (peer_connect(peer) == 0)
			changed = 1;
	}
	return changed;
}

int
main(int argc, char **argv) {
	struct   peer *filter, *peer, *npeer;
	struct	 sigaction siga;
	struct	 pollfd *pool;
	struct	 addrinfo *colladdrs, *tempaddrs;
	struct	 bindaddr_list boundto;
	struct	 bindaddr ba, *pba;
	struct	 ssl_paths ssl_paths;
	size_t	 i, poolsize, nboundaddrs;
	time_t	 t, lastdns;
	int	 c, n, fixedcollectors, rrtype, senschng;
	char	*s_bindaddr, *s_filteraddr, *peerdomain;

	strlcpy(ssl_paths.ca_crt, CERTPATH "/ca/ca.crt",
	    sizeof(ssl_paths.ca_crt));
	strlcpy(ssl_paths.my_crt, CERTPATH "/server/server.crt",
	    sizeof(ssl_paths.ca_crt));
	strlcpy(ssl_paths.my_key, CERTPATH "/server/private/server.key",
	    sizeof(ssl_paths.ca_crt));

	SLIST_INIT(&boundto);
	assl_initialize();

	filter = peer_new(0, NULL);
	if (filter == NULL)
		fatal("peer_new");
	filter->ssl_paths = &ssl_paths;
	TAILQ_INSERT_TAIL(&filters, filter, entry);

	peerdomain = NULL;
	rrtype = -1;
	fixedcollectors = 0;
	senschng = 0;
	s_filteraddr = s_bindaddr = NULL;
	tempaddrs = colladdrs = NULL;
	while ((c = getopt(argc, argv, "b:df:p:t:")) != -1) {
		switch (c) {
		case 'b':
			pba = malloc(sizeof(struct bindaddr));
			if (pba == NULL)
				fatalx("malloc");
			bzero(pba, sizeof(struct bindaddr));
			if (fill_sa_from_user(optarg, pba,
			    SASPEC_HOST_NUMERIC | SASPEC_FOR_BINDING,
			    PEER_PORT) == -1)
				fatal("setting up listen address %s", optarg);
			SLIST_INSERT_HEAD(&boundto, pba, entry);
			break;

		case 'd':
			debug++;
			break;

		case 'f':
			if (s_filteraddr != NULL)
				usage("-f cannot be specified twice");
			s_filteraddr = strdup(optarg);
			if (s_filteraddr == NULL)
				fatal("strdup");
			break;

		case 'P':
			fixedcollectors = 1;
			if (peerdomain != NULL)
				usage("-P and -p cannot be used together");
			if (rrtype != -1)
				usage("-P and -t cannot be used together");
			n = strlen(optarg);
			if (n == 0)
				fatalx("empty peer address supplied");
			peer = peer_new(0, NULL);
			if (peer == NULL)
				fatal("peer_new");
			if (fill_sa_from_user(optarg, &ba,
			    SASPEC_HOST_NUMERIC, PEER_PORT) == -1)
				fatal("setting up peer address %s", optarg);
			peer->addrlen = ba.slen;
			memcpy(&peer->addr, &ba.ss, sizeof(peer->addr));
			peer->af = peer->addr.ss_family;
			peer->ssl_paths = &ssl_paths;
			peer->status = PEER_STATUS_NEW;
			TAILQ_INSERT_TAIL(&collectors, peer, entry);
			break;

		case 'p':
			if (peerdomain != NULL)
				usage("-p cannot be used twice");
			if (fixedcollectors)
				usage("-P and -p cannot be used together");
			peerdomain = strdup(optarg);
			if (peerdomain == NULL)
				fatal("strdup");
			break;

		case 't':
			if (rrtype != -1)
				usage("-t cannot be specified twice");
			if (fixedcollectors)
				usage("-P and -t cannot be used together");
			if (strcmp(optarg, "A"))
				rrtype = T_A;
			else if (strcmp(optarg, "SRV"))
				rrtype = T_SRV;
			else
				usage("unsupported resource record type in -t");
			break;

		default:
			usage(NULL);
		}
	}
	argc -= optind;
	argv += optind;
	if (argc > 0)
		usage("extra parameters in command line");

	if (rrtype == -1)
		rrtype = T_A;

	/* set up filter connection parameters, will attempt to connect later */
	if (fill_sa_from_user(
	    s_filteraddr ? s_filteraddr : "localhost:" FILTER_PORT,
	    &ba, SASPEC_HOST_REQUIRED, FILTER_PORT) == -1)
		fatal("setting up filter address");
	if (s_filteraddr != NULL) {
		free(s_filteraddr);
		s_filteraddr = NULL;
	}
	filter->addrlen = ba.slen;
	memcpy(&filter->addr, &ba.ss, sizeof(filter->addr));
	filter->af = filter->addr.ss_family;

	/* set up listening socket(-s) */
	if (SLIST_EMPTY(&boundto)) {
		/*
		 * Do not use calloc(2, ) because we free bindaddr structures
		 * on cleanup one-by-one.
		 */
		pba = calloc(1, sizeof(struct bindaddr));
		if (pba == NULL)
			fatalx("calloc");
		fill_sa_from_user("0.0.0.0", pba,
		    SASPEC_HOST_NUMERIC | SASPEC_FOR_BINDING, PEER_PORT);
		SLIST_INSERT_HEAD(&boundto, pba, entry);

		pba = calloc(1, sizeof(struct bindaddr));
		if (pba == NULL)
			fatalx("calloc");
		fill_sa_from_user("::", pba,
		    SASPEC_HOST_NUMERIC | SASPEC_FOR_BINDING, PEER_PORT);
		SLIST_INSERT_HEAD(&boundto, pba, entry);
	}
	nboundaddrs = 0;
	SLIST_FOREACH(pba, &boundto, entry) {
		pba->fd = socket(pba->ss.ss_family, SOCK_STREAM, 0);
		if (pba->fd == -1)
			fatal("(listening) socket");
		if (fcntl(pba->fd, F_SETFL, O_NONBLOCK) == -1)
			fatal("fcntl");
		if (bind(pba->fd, (struct sockaddr *)&pba->ss, pba->slen))
			fatal("bind");
		if (listen(pba->fd, 20))
			fatal("listen");
		nboundaddrs++;
	}

	if (!fixedcollectors) {
		colladdrs = find_collectors(peerdomain, rrtype);
		if (colladdrs == NULL)
			DPRINTFX(1, "could not find any collectors");
	}
	peer_update_connections(&collectors, colladdrs, PEER_CONN_PLAIN, NULL,
	   NULL, &ssl_paths);
	check_filter_connection(filter);
	pool = build_poll_pool(&poolsize, nboundaddrs, &collectors, &filters,
	    &monitors, NULL);
	i = 0;
	SLIST_FOREACH(pba, &boundto, entry) {
		pool[i].fd = pba->fd;
		pool[i].events = POLLIN;
	}

	bzero(&siga, sizeof(struct sigaction));
	siga.sa_handler = sighandler;
	sigaction(SIGINT, &siga, NULL);
	sigaction(SIGTERM, &siga, NULL);
	sigaction(SIGUSR1, &siga, NULL);
	sigaction(SIGUSR2, &siga, NULL);
	siga.sa_handler = SIG_IGN;
	sigaction(SIGHUP, &siga, NULL);    /* XXX: re-read certificates? */

	if (!debug && daemon(0, 0))
		DPRINTF(0, "could not go daemon");

	lastdns = time(NULL);
	for (;;) {
		t = time(NULL);
		n = poll(pool, poolsize, DNS_CHECK_IN - (t - lastdns));
		switch (n) {
		case -1:
			if (errno == EINTR && quit_requested)
				goto cleanup;
			else
				DPRINTF(0, "poll");
			break;

		case 0:
			break;

		default:
			for (i = 0; i < nboundaddrs; i++) {
				if (pool[i].revents & POLLIN) {
					peer = peer_new(0, NULL);
					if (peer == NULL) {
						DPRINTF(0, "peer_new");
						continue;
					}
					peer->ssl_paths = &ssl_paths;
					if (peer_accept(peer,
					    pool[i].fd) == -1) {
						peer_free(peer);
						continue;
					}
					TAILQ_INSERT_TAIL(&monitors, peer,
					    entry);
					senschng++;
				} else {
					DPRINTF(0, "poll pool[%zu]", i);
					/* goto cleanup; */
				}
			}
			for (peer = TAILQ_FIRST(&monitors); peer != NULL;) {
				if (peer->pollptr == NULL)
					continue;
				if (peer->pollptr->revents)
					n--;
				peer_handle(peer, monitor_handlers,
				    nitems(monitor_handlers));
				npeer = TAILQ_NEXT(peer, entry);
				if (peer->status == PEER_STATUS_FAILED) {
					TAILQ_REMOVE(&monitors, peer, entry);
					peer_free(peer);
					senschng++;
				}
				peer = npeer;
				if (n == 0)
					break;
			}
			TAILQ_FOREACH(peer, &filters, entry) {
				if (peer->pollptr == NULL)
					continue;
				if (peer->pollptr->revents)
					n--;
				peer_handle(peer, filter_handlers,
				    nitems(filter_handlers));
				if (n == 0)
					break;
			}
			TAILQ_FOREACH(peer, &collectors, entry) {
				if (peer->pollptr == NULL)
					continue;
				if (peer->pollptr->revents)
					n--;
				peer_handle(peer, coalegue_handlers,
				    nitems(coalegue_handlers));
				if (n == 0)
					break;
			}
		}

		if (quit_requested)
			break;

		if (!fixedcollectors && t - lastdns >= DNS_CHECK_IN) {
			tempaddrs = find_collectors(peerdomain, rrtype);
			if (tempaddrs != NULL) {
				freeaddrinfo(colladdrs);
				colladdrs = tempaddrs;
			} else
				DPRINTFX(1, "could not find any collectors");
			lastdns = t;
		}
		if (senschng || check_filter_connection(filter) ||
		    peer_update_connections(&collectors, colladdrs,
		    PEER_CONN_PLAIN, NULL, NULL, &ssl_paths)) {
			free(pool);
			pool = build_poll_pool(&poolsize, nboundaddrs,
			    &collectors, &filters, &monitors, NULL);
			i = 0;
			SLIST_FOREACH(pba, &boundto, entry) {
				pool[i].fd = pba->fd;
				pool[i].events = POLLIN;
			}
			senschng = 0;
		}
	}

cleanup:
	free(pool);
	if (colladdrs != NULL)
		freeaddrinfo(colladdrs);
	return (0);
}
