/*
 * ivi_nl.c :
 *  IVI Netlink Kernel Communication Module
 *
 * Author :	Wentao Shang
 */

#include "ivi_nl.h"

static struct nla_policy ivi_genl_policy[IVI_NLA_MAX + 1] = {
	[IVI_NLA_V4ADDR]	= { .type = NLA_U32 },
	[IVI_NLA_PLEN4]		= { .type = NLA_U32 },
	[IVI_NLA_V6ADDR]	= { .len = sizeof(struct in6_addr) },
	[IVI_NLA_PLEN6]		= { .type = NLA_U32 },
	[IVI_NLA_RATIO]		= { .type = NLA_U16 },
	[IVI_NLA_OFFSET]	= { .type = NLA_U16 },
	[IVI_NLA_FORMAT]	= { .type = NLA_U8 },
	[IVI_NLA_ROUTE] 	= { .len = sizeof(struct in6_addr) },
};

static struct genl_family ivi_genl_family = {
	.id		= GENL_ID_GENERATE,
	.hdrsize	= 0,
	.name		= IVI_FAMILY_NAME,
	.version	= IVI_NL_VERSION,
	.maxattr	= IVI_NLA_MAX,
};

/* Multicast group */
static struct genl_multicast_group ivi_genl_mc_group = {
	.name	= IVI_GROUP_NAME,
	.id	= GENL_ID_GENERATE,
};


/* pending list definitions */

#define MAX_LOOKUP_RETRY 3

struct pending_packet {
	struct list_head node;
	struct sk_buff *oldskb;
	struct sk_buff *newskb;
};

struct pending_resolve {
	struct hlist_node node;
	u32 addr;  // unresolved ipv4 address
	struct list_head queue;  // pending packet queue
	struct timeval timer;  // last time when the node is accessed
	int retry_count;
};

struct pending_list {
	spinlock_t lock;
	struct hlist_head chain[IVI_HTABLE_SIZE];
	int size;
	time_t timeout;
};

static struct pending_list nl_list;

/* pending list operations */

/* 
 *  Release pending resolve and flush pending packets.
 *  'pnode' must be removed from list before calling this function.
 */
static void release_pending_resolve(struct pending_resolve *pnode) {
	struct pending_packet *iter, *temp;

	// flush pending queue
	list_for_each_entry_safe(iter, temp, &pnode->queue, node) {
		list_del(&iter->node);
		kfree_skb(iter->oldskb);
		kfree_skb(iter->newskb);
		kfree(iter);
	}
#ifdef IVI_DEBUG_NETLINK
	printk(KERN_DEBUG "release_pending_resolve: flush pending resolve for address " NIP4_FMT "\n", NIP4(pnode->addr));
#endif
	kfree(pnode);
}

// must be protected by spin lock when calling this function
static struct pending_packet* add_pending_packet(struct sk_buff *oskb, struct sk_buff *nskb, struct pending_resolve *pnode) {
	struct pending_packet *packet;

	packet = (struct pending_packet *)kmalloc(sizeof(struct pending_packet), GFP_ATOMIC);
	if (packet == NULL) {
#ifdef IVI_DEBUG_NETLINK
		printk(KERN_DEBUG "add_pending_packet: kmalloc() failed.\n");
#endif
		return NULL;
	}

	packet->oldskb = oskb;
	packet->newskb = nskb;
	list_add_tail(&packet->node, &pnode->queue);
#ifdef IVI_DEBUG_NETLINK
	printk(KERN_DEBUG "add_pending_packet: oskb = %p, nskb = %p, pnode = %p\n", oskb, nskb, pnode);
#endif
	return packet;
}

// Init list
static void init_pending_list(struct pending_list *list, time_t timeout) {
	int i;
	spin_lock_init(&list->lock);
	for (i = 0; i < IVI_HTABLE_SIZE; i++) {
		INIT_HLIST_HEAD(&list->chain[i]);
	}
	list->size = 0;
	list->timeout = timeout;
}

// must be protected by spin lock when calling this function
static struct pending_resolve* add_new_pending_resolve(u32 addr, struct pending_list *list) {
	struct pending_resolve *pnode;
	int hash;
	pnode = (struct pending_resolve *)kmalloc(sizeof(struct pending_resolve), GFP_ATOMIC);
	if (pnode == NULL) {
#ifdef IVI_DEBUG_NETLINK
		printk(KERN_DEBUG "add_new_pending_resolve: kmalloc() failed.\n");
#endif
		return NULL;
	}

	pnode->addr = addr;
	INIT_LIST_HEAD(&pnode->queue);
	do_gettimeofday(&pnode->timer);
	pnode->retry_count = 0;

	hash = addr_hashfn(addr);
	hlist_add_head(&pnode->node, &list->chain[hash]);
	list->size++;
#ifdef IVI_DEBUG_NETLINK
	printk(KERN_DEBUG "add_new_pending_resolve: add new pending resolve for address " NIP4_FMT " in hash bucket %d\n", NIP4(addr), hash);
#endif
	return pnode;
}

// must be protected by spin lock when calling this function
static struct pending_resolve* get_pending_resolve(u32 addr, struct pending_list *list) {
	struct pending_resolve *iter;
	struct hlist_node *loop;
	int hash, flag;
	
	hash = addr_hashfn(addr);
	flag = 0;
	
	hlist_for_each_entry(iter, loop, &list->chain[hash], node) {
		if (iter->addr == addr) {
			flag = 1;
			break;
		}
	}

	if (!flag)
		iter = NULL;

	return iter;
	
}

// must be protected by spin lock when calling this function
static inline void remove_pending_resolve(struct pending_resolve *iter, struct pending_list *list) {
	hlist_del(&iter->node);
	list->size--;
}

// Refresh the timer for each pending_resolve, must NOT acquire spin lock when calling this function
void refresh_pending_list(struct pending_list *list) {
	struct pending_resolve *iter;
	struct hlist_node *loop;
	struct hlist_node *temp;
	struct timeval now;
	time_t delta;
	int i;

	do_gettimeofday(&now);
	
	spin_lock_bh(&list->lock);
	for (i = 0; i < IVI_HTABLE_SIZE; i++) {
		hlist_for_each_entry_safe(iter, loop, temp, &list->chain[i], node) {
			delta = now.tv_sec - iter->timer.tv_sec;
			if (delta >= list->timeout) {
				hlist_del(&iter->node);
				list->size--;
				release_pending_resolve(iter);
			}
		}
	}
	spin_unlock_bh(&list->lock);
#ifdef IVI_DEBUG_NETLINK
	printk(KERN_DEBUG "refresh_pending_list: done.\n");
#endif
}

// Clear the entire list, must NOT acquire spin lock when calling this function
void free_pending_list(struct pending_list *list) {
	struct pending_resolve *iter;
	struct hlist_node *loop;
	struct hlist_node *temp;
	int i;

	spin_lock_bh(&list->lock);
	for (i = 0; i < IVI_HTABLE_SIZE; i++) {
		hlist_for_each_entry_safe(iter, loop, temp, &list->chain[i], node) {
			hlist_del(&iter->node);
			list->size--;
			release_pending_resolve(iter);
		}
	}
	spin_unlock_bh(&list->lock);
#ifdef IVI_DEBUG_NETLINK
	printk(KERN_DEBUG "free_pending_list: done.\n");
#endif
}


/*
 *  Request usr daemon to resolve the mapping rule (v4addr --> prefix + prefixlen + ratio + adjacent).
 *  Untranslated skb is cached to pending list.
 *  If -1 is returned, caller is responsible for freeing 'oskb' and 'nskb' memory.
 */
int rule_lookup_request(u32 addr, u16 port, struct sk_buff *oskb, struct sk_buff *nskb) {
	struct pending_resolve *pnode = NULL;

	spin_lock_bh(&nl_list.lock);
	pnode = get_pending_resolve(addr, &nl_list);  // FIXME: port infor should be used in pending table

	if (pnode == NULL) {
		pnode = add_new_pending_resolve(addr, &nl_list);
		if (pnode == NULL) {
			goto failed;
		}

		if (add_pending_packet(oskb, nskb, pnode) == NULL) {
			goto failed;
		}
	} else {
		// pending resolve exists, update node state
		do_gettimeofday(&pnode->timer);
		pnode->retry_count++;
		if (pnode->retry_count > MAX_LOOKUP_RETRY) {
			goto max_reached;
		}

		if (add_pending_packet(oskb, nskb, pnode) == NULL) {
			goto failed;
		}
	}
	spin_unlock_bh(&nl_list.lock);

	// Notify daemon.
	notify_daemon(addr, port);

	return 0;
	
max_reached:
	// We have reached max retry limit, give up now.
	remove_pending_resolve(pnode, &nl_list);
	spin_unlock_bh(&nl_list.lock);
	printk(KERN_DEBUG "rule_lookup_request: max retry reached for address " NIP4_FMT "\n", NIP4(addr));
	release_pending_resolve(pnode);
	// Free 'oskb' and 'nskb' for callers because they will not free them if we return 0 here.
	kfree_skb(oskb);
	kfree_skb(nskb);
	return 0;
	
failed:
	// 'oskb' and 'nskb' is freed by caller.
	spin_unlock_bh(&nl_list.lock);
	return -1;
}


/*
 *  Request usr daemon to update the mapping rule (addr + plen4 --> prefix + plen6) and network configuration (v6 addr, route) for local host.
 */
int rule_update_request(u32 addr, u16 ratio, u16 offset, struct in6_addr *prefix, int plen6, struct in6_addr *router) {
	struct sk_buff *skb = NULL;
	int len = 0;
	void *msg = NULL;
	int ret = -1;

	len = nla_total_size(sizeof(struct in6_addr)) * 2 + nla_total_size(sizeof(u32)) 
		+ nla_total_size(sizeof(int)) + nla_total_size(sizeof(u16)) * 2;
	skb = genlmsg_new(len, GFP_ATOMIC);
	if (!skb) {
		printk("rule_update_request: genlmsg_new() failed.\n");
		goto out;
	}

	msg = genlmsg_put(skb, 0, 0, &ivi_genl_family, 0, IVI_NLC_UPDATE);
	if (!msg) {
		printk("rule_update_request: genlmsg_put() failed.\n");
		nlmsg_free(skb);
		goto out;
	}

	if (nla_put_u32(skb, IVI_NLA_V4ADDR, addr)) {
		printk("rule_update_request: nla_put_u32() failed for V4ADDR.\n");
		nlmsg_free(skb);
		goto out;
	}
	
	if (nla_put_u16(skb, IVI_NLA_RATIO, ratio)) {
		printk("rule_update_request: nla_put_u16() failed for RATIO.\n");
		nlmsg_free(skb);
		goto out;
	}

	if (nla_put_u16(skb, IVI_NLA_OFFSET, offset)) {
		printk("rule_update_request: nla_put_u16() failed for OFFSET.\n");
		nlmsg_free(skb);
		goto out;
	}

	if (nla_put(skb, IVI_NLA_V6ADDR, sizeof(struct in6_addr), prefix)) {
		printk("rule_update_request: nla_put() failed for V6ADDR.\n");
		nlmsg_free(skb);
		goto out;
	}

	if (nla_put_u32(skb, IVI_NLA_PLEN6, plen6)) {
		printk("rule_update_request: nla_put_u32() failed for PLEN6.\n");
		nlmsg_free(skb);
		goto out;
	}

	if (router != NULL) {
		if (nla_put(skb, IVI_NLA_ROUTE, sizeof(struct in6_addr), router)) {
			printk("update_router: nla_put() failed for ROUTE.\n");
			nlmsg_free(skb);
			goto out;
		}
	}
	
	genlmsg_end(skb, msg);

	genlmsg_multicast(skb, 0, ivi_genl_mc_group.id, GFP_ATOMIC);

	printk("rule_update_request: fired.\n");

	ret = 0;

out:
	return ret;
}


/* 
 * handler invoked when a mapping rule is successfully resolved, 
 * return 0 on success or negative value on failure 
 */
static int ivi_genl_rule(struct sk_buff *skb, struct genl_info *info) {
	struct nlattr *nla_v4a, *nla_v6a, *nla_plen6, *nla_ratio, *nla_off, *nla_fmt;
	struct pending_resolve *pnode;
	struct pending_packet *iter, *temp;
	struct rule_info rule;
	int count;
	
	nla_v4a = info->attrs[IVI_NLA_V4ADDR];
	nla_v6a = info->attrs[IVI_NLA_V6ADDR];
	nla_plen6 = info->attrs[IVI_NLA_PLEN6];
	nla_ratio = info->attrs[IVI_NLA_RATIO];
	nla_off = info->attrs[IVI_NLA_OFFSET];
	nla_fmt = info->attrs[IVI_NLA_FORMAT];
	
	if (nla_v4a && nla_v6a && nla_plen6 && nla_ratio && nla_off && nla_fmt) {
		rule.prefix4 = nla_get_u32(nla_v4a);
		rule.plen4 = 32;
		memcpy(&rule.prefix6, nla_data(nla_v6a), sizeof(struct in6_addr));
		rule.plen6 = nla_get_u32(nla_plen6);
		rule.ratio = nla_get_u16(nla_ratio);
		rule.adjacent = 1;
		rule.format = nla_get_u8(nla_fmt);
		
		printk(KERN_DEBUG "ivi_genl_rule: user from pid %d says " NIP4_FMT " offset %d -> " NIP6_FMT "/%d\n", 
			info->snd_pid, NIP4(rule.prefix4), nla_get_u16(nla_off), NIP6(rule.prefix6), rule.plen6);

		// Insert rule
		if (rule_cache == 1) {
			if (ivi_rule_insert_dynamic(&rule) != 0) {
				printk(KERN_DEBUG "ivi_genl_rule: fail to insert " NIP4_FMT "/%d -> " NIP6_FMT "/%d\n", 
						NIP4(rule.prefix4), rule.plen4, NIP6(rule.prefix6), rule.plen6);
				return 0;
			}
			if (ivi_rule6_insert(&rule) != 0) {
				printk(KERN_DEBUG "ivi_genl_rule: fail to insert " NIP6_FMT " -> %d\n", NIP6(rule.prefix6), rule.plen6);
				return 0;
			}
		}

		// Send pending packets
		spin_lock_bh(&nl_list.lock);
		pnode = get_pending_resolve(rule.prefix4, &nl_list);
		if (pnode == NULL) {
			spin_unlock_bh(&nl_list.lock);
			printk(KERN_DEBUG "ivi_genl_rule: no pending resolve for address " NIP4_FMT "\n", NIP4(rule.prefix4));
			return 0;
		}
		remove_pending_resolve(pnode, &nl_list);
		spin_unlock_bh(&nl_list.lock);

		count = 0;
		list_for_each_entry_safe(iter, temp, &pnode->queue, node) {
			list_del(&iter->node);
			ivi_v4v6_xmit2(iter->oldskb, iter->newskb, &rule);
			kfree(iter);
			count++;
		}
#ifdef IVI_DEBUG_NETLINK
		printk(KERN_DEBUG "ivi_genl_rule: send %d pending packets for address " NIP4_FMT "\n", count, NIP4(pnode->addr));
#endif
		kfree(pnode);
	}
	
	return 0;
}

static struct genl_ops ivi_genl_ops_rule = {
	.cmd	= IVI_NLC_RULE,
	.flags	= 0,
	.policy	= ivi_genl_policy,
	.doit	= ivi_genl_rule,
	.dumpit	= NULL,
};


static int close_daemon(void) {
	struct sk_buff *skb;
	void *msg;
	int ret;

	ret = -1;

	skb = genlmsg_new(0, GFP_ATOMIC);
	if (!skb) {
#ifdef IVI_DEBUG_NETLINK
		printk(KERN_ERR "close_daemon: genlmsg_new() failed.\n");
#endif
		goto out;
	}

	msg = genlmsg_put(skb, 0, 0, &ivi_genl_family, 0, IVI_NLC_CLOSE);
	if (!msg) {
#ifdef IVI_DEBUG_NETLINK
		printk(KERN_ERR "close_daemon: genlmsg_put() failed.\n");
#endif
		nlmsg_free(skb);
		goto out;
	}

	genlmsg_end(skb, msg);

	genlmsg_multicast(skb, 0, ivi_genl_mc_group.id, GFP_ATOMIC);
#ifdef IVI_DEBUG_NETLINK
	printk(KERN_DEBUG "close_daemon: multicast message fired.\n");
#endif
	ret = 0;
out:
	return ret;
}

int notify_daemon(u32 addr, u16 port) {
	struct sk_buff *skb = NULL;
	int len = 0;
	void *msg = NULL;
	int ret = -1;

	len = nla_total_size(sizeof(u32)) + nla_total_size(sizeof(u16));
	skb = genlmsg_new(len, GFP_ATOMIC);
	if (!skb) {
		printk("notify_daemon: genlmsg_new() failed.\n");
		goto out;
	}

	msg = genlmsg_put(skb, 0, 0, &ivi_genl_family, 0, IVI_NLC_RULE);
	if (!msg) {
		printk("notify_daemon: genlmsg_put() failed.\n");
		nlmsg_free(skb);
		goto out;
	}

	if (nla_put_u32(skb, IVI_NLA_V4ADDR, addr)) {
		printk("notify_daemon: nla_put_u32() failed.\n");
		nlmsg_free(skb);
		goto out;
	}

	if (nla_put_u16(skb, IVI_NLA_OFFSET, port)) {
		printk("notify_daemon: nla_put_u16() failed.\n");
		nlmsg_free(skb);
		goto out;
	}

	genlmsg_end(skb, msg);

	genlmsg_multicast(skb, 0, ivi_genl_mc_group.id, GFP_ATOMIC);

	printk("notify_daemon: fired.\n");

	ret = 0;

out:
	return ret;
}

int notify_daemon_v6addr(struct in6_addr *addr) {
	struct sk_buff *skb = NULL;
	int len = 0;
	void *msg = NULL;
	int ret = -1;

	len = nla_total_size(sizeof(struct in6_addr));
	skb = genlmsg_new(len, GFP_ATOMIC);
	if (!skb) {
		printk("notify_daemon_v6addr: genlmsg_new() failed.\n");
		goto out;
	}

	msg = genlmsg_put(skb, 0, 0, &ivi_genl_family, 0, IVI_NLC_RULE);
	if (!msg) {
		printk("notify_daemon_v6addr: genlmsg_put() failed.\n");
		nlmsg_free(skb);
		goto out;
	}

	if (nla_put(skb, IVI_NLA_V6ADDR, sizeof(struct in6_addr), addr)) {
		printk("notify_daemon_v6addr: nla_put() failed.\n");
		nlmsg_free(skb);
		goto out;
	}

	genlmsg_end(skb, msg);

	genlmsg_multicast(skb, 0, ivi_genl_mc_group.id, GFP_ATOMIC);

	printk("notify_daemon_v6addr: fired.\n");

	ret = 0;

out:
	return ret;
}

int ivi_nl_init(void) {
	if (genl_register_family(&ivi_genl_family)) {
#ifdef IVI_DEBUG
		printk(KERN_ERR "IVI: genl_register_family() failed.\n");
#endif
		goto out;
	}

	if (genl_register_ops(&ivi_genl_family, &ivi_genl_ops_rule)) {
#ifdef IVI_DEBUG
		printk(KERN_ERR "IVI: genl_register_ops() failed.\n");
#endif
		goto out;
	}

	if (genl_register_mc_group(&ivi_genl_family, &ivi_genl_mc_group)) {
#ifdef IVI_DEBUG
		printk(KERN_ERR "IVI: genl_register_mc_group() failed.\n");
#endif
		goto out;
	}

#ifdef IVI_DEBUG
	printk(KERN_DEBUG "IVI: ivi netlink registered.\n");
#endif

	init_pending_list(&nl_list, 60);
out:
#ifdef IVI_DEBUG
	printk(KERN_DEBUG "IVI: ivi_nl loaded.\n");
#endif
	return 0;
}

void ivi_nl_exit(void) {
	if (close_daemon()) {
#ifdef IVI_DEBUG
		printk(KERN_ERR "IVI: close_daemon() failed.\n");
#endif
	}

	free_pending_list(&nl_list);
	
	genl_unregister_mc_group(&ivi_genl_family, &ivi_genl_mc_group);
	genl_unregister_ops(&ivi_genl_family, &ivi_genl_ops_rule);
	genl_unregister_family(&ivi_genl_family);
#ifdef IVI_DEBUG
	printk(KERN_DEBUG "IVI: ivi netlink unregistered.\n");
	printk(KERN_DEBUG "IVI: ivi_nl unloaded.\n");
#endif
}
