/*
 * Copyright (C) 2014
 *
 * Brick Yang <printfxxx@163.com>
 *
 * This program is free software. You can redistribute it and/or
 * modify it as you like.
 */

/**
 * @file	worker.c
 * @brief	Worker thread implementation
 */

#include <linux/init.h>
#include <linux/module.h>
#include <linux/cpu.h>
#include <linux/ctype.h>
#include <linux/kthread.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <linux/udp.h>

#include <mtrace.h>

#include "worker.h"
#include "netdev.h"
#include "cmd.h"

typedef int (worker_op_handler_t)(worker_t *worker, worker_op_t *op);

typedef struct worker_poll {
	const char *name;
	poll_fn_t *fn;
	void *arg;
	struct list_head node;
} worker_poll_t;

static char *worker_cpus;
static DEFINE_PER_CPU(worker_t, all_worker);

cpumask_t __worker_cpumask;
EXPORT_SYMBOL(__worker_cpumask);

module_param_named(cpus, worker_cpus, charp, S_IRUGO);
MODULE_PARM_DESC(cpus, "Worker cpus list");

static int worker_op_handler_worker(worker_t *worker, worker_op_t *op)
{
	flow_t *flow, *flow_tmp;
	proto_handle_t *handle;

	handle = op->args[0];

	pr_debug("Worker %u: get status\n", worker->cpu);

	list_for_each_entry_safe(flow, flow_tmp, &worker->flow_list, node) {
		if (!flow->pool_sz) {
			continue;
		}
		cmd_pr_info(handle, "[%s]\n", netdev_name(flow->netdev->ndev));
		cmd_pr_info(handle, "pool=%u/%u\n", flow->pool_used, flow->pool_sz);
		cmd_pr_info(handle, "txq=%*pbl\n", flow->netdev->ndev->real_num_tx_queues, flow->txq_bitmap);
		cmd_pr_info(handle, "qlen/qth/qwt/q_used=%u/%u/%u/%u\n", flow->qlen, flow->qth, flow->qwt, flow->q_used);
		if (flow->pkt_cnt) {
			cmd_pr_info(handle, "pkt_cnt=%llu, pkt_remain=%llu\n", flow->pkt_cnt, flow->pkt_remain);
		}
		if (flow->fc_rate) {
			cmd_pr_info(handle, "fc_mode=%s, fc_burst=%u, fc_rate=%llu\n",
				    flow->fc_mode ? "byte" : "pkt", flow->fc_burst, flow->fc_rate);
		}
		cmd_pr_info(handle, "\n");
	}

	return 0;
}

static int worker_op_handler_reg(worker_t *worker, worker_op_t *op)
{
	int rc;
	worker_poll_t *poll;
	const char *name;

	name = op->args[0];

	pr_debug("Worker %u: register poll instance of \"%s\"\n",
		 worker->cpu, name);

	poll = kzalloc(sizeof(*poll), GFP_KERNEL);
	if (!poll || MTRACE_KMEM_ADD(poll)) {
		pr_err("%s(): failed to alloc memory\n", __func__);
		rc = -ENOMEM;
		goto err;
	}

	poll->name = name;
	poll->fn = op->args[1];
	poll->arg = op->args[2];
	INIT_LIST_HEAD(&poll->node);

	local_bh_disable();
	list_add_tail(&poll->node, &worker->poll_list);
	local_bh_enable();

	return 0;
err:
	return rc;
}

static int worker_op_handler_unreg(worker_t *worker, worker_op_t *op)
{
	worker_poll_t *poll, *tmp, *found = NULL;
	const char *name;

	name = op->args[0];

	pr_debug("Worker %u: del poll instance of \"%s\"\n",
		 worker->cpu, name);

	list_for_each_entry_safe(poll, tmp, &worker->poll_list, node) {
		if ((poll->fn == (void *)op->args[1])
		&&  (poll->arg == (void *)op->args[2])) {
			found = poll;
			break;
		}
	}

	if (!found) {
		goto end;
	}

	local_bh_disable();
	list_del(&found->node);
	local_bh_enable();

	MTRACE_KMEM_DEL(found);
	kfree(found);
end:
	return 0;
}

static int worker_op_handler_bind(worker_t *worker, worker_op_t *op)
{
	int rc;
	flow_t *flow;
	netdev_t *netdev;

	netdev = op->args[0];

	pr_debug("Worker %u: \"%s\" bind\n",
		 worker->cpu, netdev_name(netdev->ndev));

	flow = per_cpu_ptr(netdev->pcpu_flow, worker->cpu);
	if (flow->netdev) {
		rc = -EBUSY;
		goto err;
	}

	flow->netdev = netdev;
	flow->qlen = 128;
	flow->qth = flow->qlen;
	flow->qwt = max(flow->qlen / 8u, 1u);
	INIT_LIST_HEAD(&flow->node);
	flow->txq_bitmap = kzalloc(BITS_TO_LONGS(netdev->ndev->real_num_tx_queues), GFP_KERNEL);
	if (!flow->txq_bitmap || MTRACE_KMEM_ADD(flow->txq_bitmap)) {
		pr_err("%s(): failed to alloc memory\n", __func__);
		rc = -ENOMEM;
		goto err;
	}
	__set_bit(0, flow->txq_bitmap);
	local_bh_disable();
	list_add_tail(&flow->node, &worker->flow_list);
	local_bh_enable();

	return 0;
err:
	flow->netdev = NULL;
	return rc;
}

static int worker_op_handler_unbind(worker_t *worker, worker_op_t *op)
{
	flow_t *flow;
	netdev_t *netdev;

	netdev = op->args[0];

	pr_debug("Worker %u: \"%s\" unbind\n",
		 worker->cpu, netdev_name(netdev->ndev));

	flow = per_cpu_ptr(netdev->pcpu_flow, worker->cpu);
	if (!flow->netdev) {
		goto end;
	}

	local_bh_disable();
	list_del(&flow->node);
	local_bh_enable();

	while (flow->pool_used) {
		flow->pool_used--;
		MTRACE_SB_NETDEV_SKB_DEL(flow->pool[flow->pool_used]);
		sb_netdev_kfree_skb(netdev, flow->pool[flow->pool_used]);
	}

	MTRACE_KMEM_DEL(flow->pool);
	kfree(flow->pool);
	MTRACE_KMEM_DEL(flow->txq_bitmap);
	kfree(flow->txq_bitmap);
end:
	return 0;
}

static int worker_op_handler_start(worker_t *worker, worker_op_t *op)
{
	int rc;
	flow_t *flow;
	netdev_t *netdev;

	netdev = op->args[0];

	pr_debug("Worker %u: \"%s\" start\n",
		 worker->cpu, netdev_name(netdev->ndev));

	flow = per_cpu_ptr(netdev->pcpu_flow, worker->cpu);
	if (!flow->netdev) {
		rc = -ENODEV;
		goto err;
	}

	if (flow->start) {
		rc = -EBUSY;
		goto err;
	}

	flow->pkt_remain = flow->pkt_cnt ? : U64_MAX;
	flow->burst_remain = flow->fc_rate ? flow->fc_burst : S64_MAX;
	flow->q_used = 0;
	flow->q_halt = 0;
	flow->tx_head = 0;

	flow->burst_prev = plat_time_get();
	flow->start = true;

	return 0;
err:
	flow->netdev = NULL;
	return rc;
}

static int worker_op_handler_stop(worker_t *worker, worker_op_t *op)
{
	int rc;
	flow_t *flow;
	netdev_t *netdev;

	netdev = op->args[0];

	if (worker->op_resp != -EINPROGRESS) {
		pr_debug("Worker %u: %s stop\n",
			 worker->cpu, netdev_name(netdev->ndev));
	}

	flow = per_cpu_ptr(netdev->pcpu_flow, worker->cpu);
	if (!flow->netdev) {
		rc = -ENODEV;
		goto err;
	}

	flow->pkt_remain = 0;

	if (flow->q_used) {
		rc = -EINPROGRESS;
		goto err;
	}

	flow->start = false;

	return 0;
err:
	return rc;
}

static int worker_op_handler_free(worker_t *worker, worker_op_t *op)
{
	flow_t *flow, *tmp;
	netdev_t *netdev;

	pr_debug("Worker %u: free\n", worker->cpu);

	list_for_each_entry_safe(flow, tmp, &worker->flow_list, node) {
		netdev = flow->netdev;
		op->args[0] = netdev;
		worker_op_handler_unbind(worker, op);
	}

	return 0;
}

static int worker_op_handler_fc(worker_t *worker, worker_op_t *op)
{
	int rc;
	u32 mode, burst;
	u64 rate, ns;
	flow_t *flow;
	netdev_t *netdev;

	netdev = op->args[0];
	get_arg(mode, op->args[1]);
	get_arg(burst, op->args[2]);
	get_arg(rate, op->args[3]);

	pr_debug("Worker %u: set \"%s\" flow control to %u:%u:%llu\n",
		 worker->cpu, netdev_name(netdev->ndev), mode, burst, rate);

	flow = per_cpu_ptr(netdev->pcpu_flow, worker->cpu);
	if (!flow->netdev) {
		rc = -ENODEV;
		goto err;
	}

	if (flow->start) {
		rc = -EBUSY;
		goto err;
	}

	if (rate) {
		burst = max(burst, 1u);
		burst = min(burst, (u32)rate);
		ns = (u64)NSEC_PER_SEC * burst;
		ns = div64_u64(ns + (rate >> 1), rate);
	} else {
		burst = 0;
		ns = 0;
	}

	flow->fc_rate = rate;
	flow->fc_mode = !!mode;
	flow->fc_burst = burst;
	flow->burst_invl = nsec_to_plat_time(ns);
	flow->burst_remain = rate ? burst : S64_MAX;

	return 0;
err:
	return rc;
}

static int worker_op_handler_pool_sz(worker_t *worker, worker_op_t *op)
{
	int rc;
	u32 sz;
	unsigned int i, pool_used;
	flow_t *flow;
	netdev_t *netdev;
	struct sk_buff *skb, **pool = NULL;

	netdev = op->args[0];
	get_arg(sz, op->args[1]);

	pr_debug("Worker %u: set \"%s\" pool size to %u\n",
		 worker->cpu, netdev_name(netdev->ndev), sz);

	flow = per_cpu_ptr(netdev->pcpu_flow, worker->cpu);
	if (!flow->netdev) {
		rc = -ENODEV;
		goto err;
	}

	if (flow->start) {
		rc = -EBUSY;
		goto err;
	}

	if (sz) {
		if (!(pool = kzalloc(sizeof(*pool) * sz, GFP_KERNEL))
		||  MTRACE_KMEM_ADD(pool)) {
			pr_err("%s(): failed to alloc memory\n", __func__);
			rc = -ENOMEM;
			goto err;
		}
	}

	for (i = 0, pool_used = 0; i < flow->pool_used; i++) {
		skb = flow->pool[i];
		if (pool_used < sz) {
			pool[pool_used++] = skb;
		} else {
			MTRACE_SB_NETDEV_SKB_DEL(skb);
			sb_netdev_kfree_skb(netdev, skb);
		}
	}
	MTRACE_KMEM_DEL(flow->pool);
	kfree(flow->pool);
	flow->pool = pool;
	flow->pool_used = pool_used;
	flow->pool_sz = sz;

	return 0;
err:
	MTRACE_KMEM_DEL(pool);
	kfree(pool);
	return rc;
}

static int worker_op_handler_queue(worker_t *worker, worker_op_t *op)
{
	int rc;
	u32 qlen, qth, qwt;
	flow_t *flow;
	netdev_t *netdev;

	netdev = op->args[0];
	get_arg(qlen, op->args[1]);
	get_arg(qth, op->args[2]);
	get_arg(qwt, op->args[3]);

	pr_debug("Worker %u: set \"%s\" queue to %u:%u:%u\n",
		 worker->cpu, netdev_name(netdev->ndev), qlen, qth, qwt);

	if (!qlen || !qwt || (qth > qlen) || (qwt > qlen)) {
		rc = -EINVAL;
		goto err;
	}

	flow = per_cpu_ptr(netdev->pcpu_flow, worker->cpu);
	if (!flow->netdev) {
		rc = -ENODEV;
		goto err;
	}

	if (flow->start) {
		rc = -EBUSY;
		goto err;
	}

	flow->qlen = qlen;
	flow->qth = qth;
	flow->qwt = qwt;

	return 0;
err:
	return rc;
}

static int worker_op_handler_pkt_cnt(worker_t *worker, worker_op_t *op)
{
	int rc;
	u64 cnt;
	flow_t *flow;
	netdev_t *netdev;

	netdev = op->args[0];
	get_arg(cnt, op->args[1]);

	pr_debug("Worker %u: set \"%s\" packet count to %llu\n",
		 worker->cpu, netdev_name(netdev->ndev), cnt);

	flow = per_cpu_ptr(netdev->pcpu_flow, worker->cpu);
	if (!flow->netdev) {
		rc = -ENODEV;
		goto err;
	}

	flow->pkt_cnt = cnt;
	flow->pkt_remain = cnt ? : U64_MAX;

	return 0;
err:
	return rc;
}

static int worker_op_handler_add_pkt(worker_t *worker, worker_op_t *op)
{
	int rc;
	flow_t *flow;
	netdev_t *netdev;
	unsigned int txq, sz, n = 0;
	struct sk_buff *skb;
	proto_rxd_t *rxd;
	proto_handle_t *handle;
	struct net_device *ndev;

	netdev = op->args[0];
	ndev = netdev->ndev;
	handle = op->args[1];
	rxd = op->args[2];

	pr_debug("Worker %u: add pkts of \"%s\"\n",
		 worker->cpu, netdev_name(ndev));

	flow = per_cpu_ptr(netdev->pcpu_flow, worker->cpu);
	if (!flow->netdev) {
		rc = -ENODEV;
		goto err;
	}

	if (flow->start) {
		rc = -EBUSY;
		goto err;
	}

	n = flow->pool_used % bitmap_weight(flow->txq_bitmap, ndev->real_num_tx_queues);
	txq = find_first_bit(flow->txq_bitmap, ndev->real_num_tx_queues);
	while (n--) {
		txq = find_next_bit(flow->txq_bitmap, ndev->real_num_tx_queues, txq + 1);
	}

	for (n = 0; ; n++) {
		if ((rc = proto_recv(handle, rxd)) < 0) {
			goto err;
		}
		if (!rxd->hdr.length) {
			break;
		}
		if (flow->pool_used == flow->pool_sz) {
			rc = -ENOMEM;
			goto err;
		}
		sz = max_t(unsigned int, ETH_ZLEN, rxd->hdr.length);
		if (!(skb = sb_netdev_alloc_skb(netdev, sz, GFP_KERNEL | GFP_DMA))
		||  MTRACE_SB_NETDEV_SKB_ADD(skb, netdev)) {
			pr_err("%s(): failed to alloc skb\n", __func__);
			rc = -ENOMEM;
			goto err;
		}
		skb_reset_mac_header(skb);
		skb_set_queue_mapping(skb, txq);
		memcpy(skb->data, rxd->buf, rxd->hdr.length);
		if (rxd->hdr.length < sz) {
			memset(skb->data + rxd->hdr.length, 0, sz - rxd->hdr.length);
		}
		skb_put(skb, sz);
		flow->pool[flow->pool_used++] = skb;
		txq = find_next_bit(flow->txq_bitmap, ndev->real_num_tx_queues, txq + 1);
		if (txq >= ndev->real_num_tx_queues) {
			txq = find_first_bit(flow->txq_bitmap, ndev->real_num_tx_queues);
		}
	}

	return 0;
err:
	while (n--) {
		flow->pool_used--;
		MTRACE_SB_NETDEV_SKB_DEL(flow->pool[flow->pool_used]);
		sb_netdev_kfree_skb(netdev, flow->pool[flow->pool_used]);
		flow->pool[flow->pool_used] = NULL;
	}
	return rc;
}

static int worker_op_handler_del_pkt(worker_t *worker, worker_op_t *op)
{
	int rc;
	flow_t *flow;
	netdev_t *netdev;

	netdev = op->args[0];

	pr_debug("Worker %u: delete pkts of \"%s\"\n",
		 worker->cpu, netdev_name(netdev->ndev));

	flow = per_cpu_ptr(netdev->pcpu_flow, worker->cpu);
	if (!flow->netdev) {
		rc = -ENODEV;
		goto err;
	}

	if (flow->start) {
		rc = -EBUSY;
		goto err;
	}

	while (flow->pool_used) {
		flow->pool_used--;
		MTRACE_SB_NETDEV_SKB_DEL(flow->pool[flow->pool_used]);
		sb_netdev_kfree_skb(netdev, flow->pool[flow->pool_used]);
		flow->pool[flow->pool_used] = NULL;
	}

	return 0;
err:
	return rc;
}

static int worker_op_handler_pkt_txq(worker_t *worker, worker_op_t *op)
{
	int rc;
	flow_t *flow;
	netdev_t *netdev;
	const char *str;
	unsigned int i, txq;
	unsigned long *txq_bitmap = NULL;
	struct sk_buff *skb;
	struct net_device *ndev;

	netdev = op->args[0];
	ndev = netdev->ndev;
	str = op->args[1];

	pr_debug("Worker %u: set \"%s\" pkts txq mapping to %s\n",
		 worker->cpu, netdev_name(ndev), str);

	flow = per_cpu_ptr(netdev->pcpu_flow, worker->cpu);
	if (!flow->netdev) {
		rc = -ENODEV;
		goto err;
	}

	if (!str) {
		rc = -EINVAL;
		goto err;
	}

	if (flow->start) {
		rc = -EBUSY;
		goto err;
	}

	txq_bitmap = kzalloc(BITS_TO_LONGS(ndev->real_num_tx_queues), GFP_KERNEL);
	if (!txq_bitmap || MTRACE_KMEM_ADD(txq_bitmap)) {
		pr_err("%s(): failed to alloc memory\n", __func__);
		rc = -ENOMEM;
		goto err;
	}

	if ((rc = bitmap_parselist(str, txq_bitmap, ndev->real_num_tx_queues))) {
		goto err;
	}
	if (bitmap_empty(txq_bitmap, ndev->real_num_tx_queues)) {
		rc = -EINVAL;
		goto err;
	}
	MTRACE_KMEM_DEL(flow->txq_bitmap);
	kfree(flow->txq_bitmap);
	flow->txq_bitmap = txq_bitmap;

	txq = find_first_bit(flow->txq_bitmap, ndev->real_num_tx_queues);
	for (i = 0; i < flow->pool_used; i++) {
		skb = flow->pool[i];
		skb_set_queue_mapping(skb, txq);
		txq = find_next_bit(flow->txq_bitmap, ndev->real_num_tx_queues, txq + 1);
		if (txq >= ndev->real_num_tx_queues) {
			txq = find_first_bit(flow->txq_bitmap, ndev->real_num_tx_queues);
		}
	}

	return 0;
err:
	MTRACE_KMEM_DEL(txq_bitmap);
	kfree(txq_bitmap);
	return rc;
}

static int worker_op_handler_dump_pkt(worker_t *worker, worker_op_t *op)
{
	int rc;
	flow_t *flow;
	netdev_t *netdev;
	unsigned int i;
	struct sk_buff *skb;
	proto_txd_t txd;
	proto_handle_t *handle;

	netdev = op->args[0];
	handle = op->args[1];

	pr_debug("Worker %u: dump pkts of \"%s\"\n",
		 worker->cpu, netdev_name(netdev->ndev));

	flow = per_cpu_ptr(netdev->pcpu_flow, worker->cpu);
	if (!flow->netdev) {
		rc = -ENODEV;
		goto err;
	}

	if (flow->start) {
		rc = -EBUSY;
		goto err;
	}

	for (i = 0; i < flow->pool_used; i++) {
		skb = flow->pool[i];
		txd.iov[0].iov_base = skb->data;
		txd.iov[0].iov_len = skb->len;
		txd.hdr.magic = MAGIC_PRIV;
		txd.num = 1;
		if ((rc = proto_send(handle, &txd)) < 0) {
			goto err;
		}
	}

	return 0;
err:
	return rc;
}

static worker_op_handler_t *worker_op_handler[WORKER_OP_MAXIMUM] = {
	[WORKER_OP_WORKER]    = worker_op_handler_worker,
	[WORKER_OP_REG]       = worker_op_handler_reg,
	[WORKER_OP_UNREG]     = worker_op_handler_unreg,
	[WORKER_OP_BIND]      = worker_op_handler_bind,
	[WORKER_OP_UNBIND]    = worker_op_handler_unbind,
	[WORKER_OP_START]     = worker_op_handler_start,
	[WORKER_OP_STOP]      = worker_op_handler_stop,
	[WORKER_OP_FREE]      = worker_op_handler_free,
	[WORKER_OP_FC]        = worker_op_handler_fc,
	[WORKER_OP_QUEUE]     = worker_op_handler_queue,
	[WORKER_OP_POOL_SZ]   = worker_op_handler_pool_sz,
	[WORKER_OP_PKT_CNT]   = worker_op_handler_pkt_cnt,
	[WORKER_OP_ADD_PKT]   = worker_op_handler_add_pkt,
	[WORKER_OP_DEL_PKT]   = worker_op_handler_del_pkt,
	[WORKER_OP_PKT_TXQ]   = worker_op_handler_pkt_txq,
	[WORKER_OP_DUMP_PKT]  = worker_op_handler_dump_pkt,
};

static void worker_op_process(worker_t *worker, worker_op_t *op)
{
	if (worker->op_resp != -EINPROGRESS) {
		pr_debug("Worker %u: receive op %x\n", worker->cpu, op->opcode);
	}

	if ((op->opcode < WORKER_OP_MAXIMUM) && (worker_op_handler[op->opcode])) {
		worker->op_resp = worker_op_handler[op->opcode](worker, op);
	} else {
		worker->op_resp = -EINVAL;
	}

	if (worker->op_resp != -EINPROGRESS) {
		pr_debug("Worker %u: post finish for op %u\n", worker->cpu, op->opcode);
		WRITE_ONCE(worker->op, NULL);
		wake_up(&worker->resp_wq);
	}
}

/**
 * @brief	Post a command to specified worker
 */
int worker_op_post(worker_op_t *op)
{
	int rc;
	bool parallel;
	worker_t *worker;
	unsigned int cpu;

	pr_debug("Post op 0x%x\n", op->opcode);

	if (!cpumask_subset(&op->cpumask, worker_cpumask)) {
		rc = -ENODEV;
		goto err;
	}

	if ((op->opcode & WORKER_OP_F_EXCLUSIVE) && cpumask_weight(&op->cpumask) > 1) {
		rc = -EINVAL;
		goto err;
	}

	parallel = !!(op->opcode & WORKER_OP_F_PARALLEL);
	op->opcode &= ~(WORKER_OP_F_PARALLEL | WORKER_OP_F_EXCLUSIVE);

	rc = 0;
	if (parallel) {
		for_each_cpu(cpu, &op->cpumask) {
			worker = worker_get(cpu);
			mutex_lock(&worker->op_lock);
			worker->op_resp = 0;
			smp_wmb();
			WRITE_ONCE(worker->op, op);
			smp_wmb();
			if (!cpumask_test_cpu(cpu, worker_cpumask)) {
				wake_up_process(worker->th);
			}
		}

		for_each_cpu(cpu, &op->cpumask) {
			worker = worker_get(cpu);
			wait_event(worker->resp_wq, !READ_ONCE(worker->op));
			rc = rc ? : worker->op_resp;
			mutex_unlock(&worker->op_lock);
		}
	} else {
		for_each_cpu(cpu, &op->cpumask) {
			worker = worker_get(cpu);
			mutex_lock(&worker->op_lock);
			worker->op_resp = 0;
			smp_wmb();
			WRITE_ONCE(worker->op, op);
			smp_wmb();
			if (!cpumask_test_cpu(cpu, worker_cpumask)) {
				wake_up_process(worker->th);
			}
			wait_event(worker->resp_wq, !READ_ONCE(worker->op));
			rc = rc ? : worker->op_resp;
			mutex_unlock(&worker->op_lock);
		}
	}

	if (rc) {
		goto err;
	}

	return 0;
err:
	return rc;
}

static int cmd_parse_cpumask(const char *str, cpumask_t *cpumask,
			     proto_handle_t *handle)
{
	int rc;

	if (!strcmp(str, "all")) {
		cpumask_copy(cpumask, worker_cpumask);
		goto ok;
	}

	cpumask_clear(cpumask);
	if (cpulist_parse(str, cpumask)
	||  !cpumask_subset(cpumask, worker_cpumask)) {
		rc = -EINVAL;
		goto err;
	}
ok:
	return 0;
err:
	if (handle) {
		cmd_pr_err(handle, "ERR: invalid cpu \"%s\"\n", str);
	}
	return rc;
}

static int cmd_parse_netdev(const char *str, netdev_t **netdev,
			    proto_handle_t *handle)
{
	int rc;

	if (!(*netdev = sb_netdev_get_by_name(str))) {
		rc = -ENODEV;
		goto err;
	}

	return 0;
err:
	if (handle) {
		cmd_pr_err(handle, "ERR: invalid netdev \"%s\"\n", str);
	}
	return rc;
}

static int worker_cmd_worker(proto_handle_t *handle, proto_rxd_t *rxd, unsigned long param)
{
	int rc;
	worker_t *worker;
	const char *arg_cpus;
	worker_op_t op;
	unsigned int cpu;

	rxd->len = rxd->hdr.length;
	arg_cpus = proto_get_str(&rxd->buf, &rxd->len);

	if (!arg_cpus) {
		if ((rc = cmd_parse_cpumask("all", &op.cpumask, handle))) {
			goto err;
		}
		for_each_cpu(cpu, &op.cpumask) {
			worker = worker_get(cpu);
			if (IS_ERR_OR_NULL(worker->th)) {
				continue;
			}
			cmd_pr_info(handle, "worker %u\n", worker->cpu);
		}
	} else {
		op.opcode = WORKER_OP_WORKER;
		op.args[0] = handle;
		if ((rc = cmd_parse_cpumask(arg_cpus, &op.cpumask, handle))
		||  (rc = worker_op_post(&op))) {
			goto err;
		}
	}

	return 0;
err:
	return rc;
}

static int worker_cmd_cpu_dev(proto_handle_t *handle, proto_rxd_t *rxd, unsigned long param)
{
	int rc;
	const char *arg_cpus, *arg_dev;
	proto_rxd_t r;
	worker_op_t op;

	r = *rxd;
	r.len = r.hdr.length;
	if (!(arg_cpus = proto_get_str(&r.buf, &r.len))
	||  !(arg_dev = proto_get_str(&r.buf, &r.len))) {
		rc = -EINVAL;
		goto err;
	}

	op.opcode = param;
	op.args[1] = handle;
	op.args[2] = rxd;
	if ((rc = cmd_parse_cpumask(arg_cpus, &op.cpumask, handle))
	||  (rc = cmd_parse_netdev(arg_dev, (netdev_t **)&op.args[0], handle))
	||  (rc = worker_op_post(&op))) {
		goto err;
	}

	return 0;
err:
	return rc;
}

static int worker_cmd_cpu_dev_u32(proto_handle_t *handle, proto_rxd_t *rxd, unsigned long param)
{
	int rc;
	u32 u32;
	const char *arg_cpus, *arg_dev;
	proto_rxd_t r;
	worker_op_t op;

	r = *rxd;
	r.len = r.hdr.length;
	if (!(arg_cpus = proto_get_str(&r.buf, &r.len))
	||  !(arg_dev = proto_get_str(&r.buf, &r.len))
	||  (proto_get_u32(&r.buf, &r.len, &u32) < 0)) {
		rc = -EINVAL;
		goto err;
	}

	op.opcode = param;
	set_arg(op.args[1], u32);
	op.args[2] = handle;
	op.args[3] = rxd;
	if ((rc = cmd_parse_cpumask(arg_cpus, &op.cpumask, handle))
	||  (rc = cmd_parse_netdev(arg_dev, (netdev_t **)&op.args[0], handle))
	||  (rc = worker_op_post(&op))) {
		goto err;
	}

	return 0;
err:
	return rc;
}

static int worker_cmd_cpu_dev_u64(proto_handle_t *handle, proto_rxd_t *rxd, unsigned long param)
{
	int rc;
	u64 u64;
	const char *arg_cpus, *arg_dev;
	proto_rxd_t r;
	worker_op_t op;

	r = *rxd;
	r.len = r.hdr.length;
	if (!(arg_cpus = proto_get_str(&r.buf, &r.len))
	||  !(arg_dev = proto_get_str(&r.buf, &r.len))
	||  (proto_get_u64(&r.buf, &r.len, &u64) < 0)) {
		rc = -EINVAL;
		goto err;
	}

	op.opcode = param;
	set_arg(op.args[1], u64);
	op.args[2] = handle;
	op.args[3] = rxd;
	if ((rc = cmd_parse_cpumask(arg_cpus, &op.cpumask, handle))
	||  (rc = cmd_parse_netdev(arg_dev, (netdev_t **)&op.args[0], handle))
	||  (rc = worker_op_post(&op))) {
		goto err;
	}

	return 0;
err:
	return rc;
}

static int worker_cmd_cpu_dev_str(proto_handle_t *handle, proto_rxd_t *rxd, unsigned long param)
{
	int rc;
	char *arg_str;
	const char *arg_cpus, *arg_dev;
	proto_rxd_t r;
	worker_op_t op;

	r = *rxd;
	r.len = r.hdr.length;
	if (!(arg_cpus = proto_get_str(&r.buf, &r.len))
	||  !(arg_dev = proto_get_str(&r.buf, &r.len))
	||  !(arg_str = proto_get_str(&r.buf, &r.len))) {
		rc = -EINVAL;
		goto err;
	}

	op.opcode = param;
	op.args[1] = arg_str;
	op.args[2] = handle;
	op.args[3] = rxd;
	if ((rc = cmd_parse_cpumask(arg_cpus, &op.cpumask, handle))
	||  (rc = cmd_parse_netdev(arg_dev, (netdev_t **)&op.args[0], handle))
	||  (rc = worker_op_post(&op))) {
		goto err;
	}

	return 0;
err:
	return rc;
}

static int worker_cmd_cpu_dev_u32_u32_u32(proto_handle_t *handle, proto_rxd_t *rxd, unsigned long param)
{
	int rc;
	u32 u32[3];
	const char *arg_cpus, *arg_dev;
	proto_rxd_t r;
	worker_op_t op;

	r = *rxd;
	r.len = r.hdr.length;
	if (!(arg_cpus = proto_get_str(&r.buf, &r.len))
	||  !(arg_dev = proto_get_str(&r.buf, &r.len))
	||  (proto_get_u32(&r.buf, &r.len, &u32[0]) < 0)
	||  (proto_get_u32(&r.buf, &r.len, &u32[1]) < 0)
	||  (proto_get_u32(&r.buf, &r.len, &u32[2]) < 0)) {
		rc = -EINVAL;
		goto err;
	}

	op.opcode = param;
	set_arg(op.args[1], u32[0]);
	set_arg(op.args[2], u32[1]);
	set_arg(op.args[3], u32[2]);
	op.args[4] = handle;
	op.args[5] = rxd;
	if ((rc = cmd_parse_cpumask(arg_cpus, &op.cpumask, handle))
	||  (rc = cmd_parse_netdev(arg_dev, (netdev_t **)&op.args[0], handle))
	||  (rc = worker_op_post(&op))) {
		goto err;
	}

	return 0;
err:
	return rc;
}

static int worker_cmd_cpu_dev_u32_u32_u64(proto_handle_t *handle, proto_rxd_t *rxd, unsigned long param)
{
	int rc;
	u32 u32[2];
	u64 u64;
	const char *arg_cpus, *arg_dev;
	proto_rxd_t r;
	worker_op_t op;

	r = *rxd;
	r.len = r.hdr.length;
	if (!(arg_cpus = proto_get_str(&r.buf, &r.len))
	||  !(arg_dev = proto_get_str(&r.buf, &r.len))
	||  (proto_get_u32(&r.buf, &r.len, &u32[0]) < 0)
	||  (proto_get_u32(&r.buf, &r.len, &u32[1]) < 0)
	||  (proto_get_u64(&r.buf, &r.len, &u64) < 0)) {
		rc = -EINVAL;
		goto err;
	}

	op.opcode = param;
	set_arg(op.args[1], u32[0]);
	set_arg(op.args[2], u32[1]);
	set_arg(op.args[3], u64);
	op.args[4] = handle;
	op.args[5] = rxd;
	if ((rc = cmd_parse_cpumask(arg_cpus, &op.cpumask, handle))
	||  (rc = cmd_parse_netdev(arg_dev, (netdev_t **)&op.args[0], handle))
	||  (rc = worker_op_post(&op))) {
		goto err;
	}

	return 0;
err:
	return rc;
}

enum {
	PROTO_L1_MAC = 0,
	PROTO_L2_ETH,
	PROTO_L3_IPV4,
	PROTO_L4_UDP,
};

static cmd_t worker_cmd[WORKER_ID_MAX] = {
	[WORKER_ID_WORKER]    = {worker_cmd_worker,              0},
	[WORKER_ID_FC]        = {worker_cmd_cpu_dev_u32_u32_u64, WORKER_OP_FC},
	[WORKER_ID_QUEUE]     = {worker_cmd_cpu_dev_u32_u32_u32, WORKER_OP_QUEUE},
	[WORKER_ID_POOL_SZ]   = {worker_cmd_cpu_dev_u32,         WORKER_OP_POOL_SZ},
	[WORKER_ID_PKT_CNT]   = {worker_cmd_cpu_dev_u64,         WORKER_OP_PKT_CNT},
	[WORKER_ID_ADD_PKT]   = {worker_cmd_cpu_dev,             WORKER_OP_ADD_PKT | WORKER_OP_F_EXCLUSIVE},
	[WORKER_ID_DEL_PKT]   = {worker_cmd_cpu_dev,             WORKER_OP_DEL_PKT},
	[WORKER_ID_PKT_TXQ]   = {worker_cmd_cpu_dev_str,         WORKER_OP_PKT_TXQ},
	[WORKER_ID_DUMP_PKT]  = {worker_cmd_cpu_dev,             WORKER_OP_DUMP_PKT | WORKER_OP_F_EXCLUSIVE},
};

static int worker_cmd_fn(proto_handle_t *handle, proto_rxd_t *desc, unsigned long param)
{
	u8 id = param;
	int rc;
	cmd_fn_t *fn;

	if (id >= WORKER_ID_MAX) {
		rc = -EINVAL;
		goto err;
	}

	if (!(fn = worker_cmd[id].fn)) {
		rc = -ENOSYS;
		goto err;
	}

	return fn(handle, desc, worker_cmd[id].param);
err:
	return rc;
}

worker_t *worker_get(unsigned int cpu)
{
	return per_cpu_ptr(&all_worker, cpu);
}

static void calc_burst_remain(flow_t *flow)
{
	plat_time_t now;

	if (flow->burst_remain <= 0) {
		now = plat_time_get();
		if (unlikely(now - flow->burst_prev >= flow->burst_invl)) {
			flow->burst_remain += flow->fc_burst;
			flow->burst_prev = now;
		}
	}
}

static unsigned int traffic_burst(netdev_t *netdev, struct sk_buff **pool, unsigned int nr)
{
	unsigned int i, txq;
	struct sk_buff *skb;
	struct net_device *ndev;
	struct netdev_queue *tx_queue;

	ndev = netdev->ndev;

	for (i = 0; i < nr; i++) {
		skb = pool[i];
		txq = skb_get_queue_mapping(skb);
		tx_queue = netdev_get_tx_queue(ndev, txq);
		if (netif_xmit_frozen_or_stopped(tx_queue)
		||  (netdev->netdev_ops->ndo_start_xmit(skb, ndev) != NETDEV_TX_OK)) {
			break;
		}
	}

	return i;
}

static void traffic_run(flow_t *flow)
{
	u64 pkt_remain, pkts, bytes;
	s64 burst_remain;
	netdev_t *netdev;
	unsigned int i, n, nr, budget, txq, head, q_used;
	struct sk_buff *skbs[32];
	struct net_device *ndev;
	struct netdev_queue *tx_queue;
	void (*poll)(flow_t *);
	unsigned int (*burst)(struct net_device *, struct sk_buff **, unsigned int nr);

	if (!flow->pool_used) {
		goto end;
	}

	calc_burst_remain(flow);

	budget = flow->qwt;
	netdev = flow->netdev;
	ndev = netdev->ndev;
	poll = netdev->priv_ops ? netdev->priv_ops->pcpu_flow_poll : NULL;
	burst = netdev->priv_ops ? netdev->priv_ops->netdev_tx_burst : NULL;

	local_bh_disable();
	for_each_set_bit(txq, flow->txq_bitmap, ndev->real_num_tx_queues) {
		tx_queue = netdev_get_tx_queue(ndev, txq);
		if (!HARD_TX_TRYLOCK(ndev, tx_queue)) {
			goto err;
		}
	}

	while (budget) {
		pkt_remain = flow->pkt_remain;
		burst_remain = flow->burst_remain;
		head = flow->tx_head;
		q_used = flow->q_used;
		if ((flow->q_halt = flow->q_halt ? (q_used > flow->qth) : (q_used == flow->qlen))) {
			break;
		}
		n = min(budget, (unsigned int)ARRAY_SIZE(skbs));
		n = min(n, flow->qlen - q_used);
		for (nr = 0; nr < n; nr++) {
			if (!pkt_remain || (burst_remain <= 0)) {
				break;
			}
			skbs[nr] = flow->pool[head++];
			head = (head == flow->pool_used) ? 0 : head;
			pkt_remain--;
			burst_remain -= flow->fc_mode ? skbs[nr]->len : 1;
		}
		if (!nr) {
			break;
		}
		if (!poll) {
			for (i = 0; i < nr; i++) {
				skb_get(skbs[i]);
			}
		}
		n = burst ? (*burst)(ndev, skbs, nr) : traffic_burst(netdev, skbs, nr);
		if (!poll) {
			for (i = n; i < nr; i++) {
				dev_kfree_skb(skbs[i]);
			}
		}
		pkts = flow->tx_pkts;
		bytes = flow->tx_bytes;
		for (i = 0; i < n; i++) {
			flow->tx_head++;
			flow->tx_head = (flow->tx_head == flow->pool_used) ? 0 : flow->tx_head;
			if (flow->pkt_cnt) {
				flow->pkt_remain--;
			}
			if (flow->fc_rate) {
				flow->burst_remain -= flow->fc_mode ? skbs[i]->len : 1;
			}
			flow->q_used++;
			pkts++;
			bytes += skbs[i]->len + ETH_FCS_LEN;
		}
		raw_write_seqcount_begin(&flow->tx_stats_seq);
		flow->tx_pkts = pkts;
		flow->tx_bytes = bytes;
		raw_write_seqcount_end(&flow->tx_stats_seq);

		if (n < nr) {
			break;
		}
		budget -= n;
	}

	for_each_set_bit(txq, flow->txq_bitmap, ndev->real_num_tx_queues) {
		tx_queue = netdev_get_tx_queue(ndev, txq);
		txq_trans_update(ndev, tx_queue);
		HARD_TX_UNLOCK(ndev, tx_queue);
	}
	local_bh_enable();

	return;
err:
	for_each_set_bit(i, flow->txq_bitmap, ndev->real_num_tx_queues) {
		if (i >= txq) {
			break;
		}
		tx_queue = netdev_get_tx_queue(ndev, i);
		HARD_TX_UNLOCK(ndev, tx_queue);
	}
	local_bh_enable();
end:
	return;
}

static void pcpu_flow_poll(flow_t *flow)
{
	unsigned int i, q_used;
	struct sk_buff *skb;
	void (*poll)(flow_t *);

	poll = flow->netdev->priv_ops ? flow->netdev->priv_ops->pcpu_flow_poll : NULL;

	if (poll) {
		return poll(flow);
	}

	for (i = 0, q_used = 0; i < flow->pool_used; i++) {
		skb = flow->pool[i];
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)
		q_used += atomic_read(&skb->users) - 1;
#else
		q_used += refcount_read(&skb->users) - 1;
#endif
	}
	flow->q_used = q_used;
}

static int worker_fn(void *arg)
{
	flow_t *flow, *flow_tmp;
	worker_t *worker = arg;
	worker_op_t *op;
	worker_poll_t *poll, *poll_tmp;

	WRITE_ONCE(worker->op, NULL);

	while (1) {
		if (kthread_should_stop() && list_empty(&worker->flow_list)) {
			break;
		}
		list_for_each_entry_safe(poll, poll_tmp, &worker->poll_list, node) {
			poll->fn(poll->arg);
		}
		list_for_each_entry_safe(flow, flow_tmp, &worker->flow_list, node) {
			if (!flow->start) {
				continue;
			}
			traffic_run(flow);
			pcpu_flow_poll(flow);
		}

		if (unlikely(op = READ_ONCE(worker->op))) {
			worker_op_process(worker, op);
		}

		if (unlikely(need_resched())) {
			schedule();
		}
	}

	return 0;
}

static int worker_init(unsigned int cpu)
{
	int rc;
	worker_t *worker;

	worker = per_cpu_ptr(&all_worker, cpu);
	worker->cpu = cpu;
	WRITE_ONCE(worker->op, (void *)0x1 + POISON_POINTER_DELTA);
	init_waitqueue_head(&worker->op_wq);
	init_waitqueue_head(&worker->resp_wq);
	mutex_init(&worker->op_lock);
	INIT_LIST_HEAD(&worker->poll_list);
	INIT_LIST_HEAD(&worker->flow_list);

	if (!cpumask_test_cpu(cpu, worker_cpumask)) {
		rc = 0;
		goto err;
	}

	worker->th = kthread_create(worker_fn, worker, "worker%u", worker->cpu);
	if (IS_ERR(worker->th)) {
		pr_err("Failed to create \"worker %u\"\n", worker->cpu);
		rc = PTR_ERR(worker->th);
		goto err;
	}
	set_user_nice(worker->th, -5);
	kthread_bind(worker->th, worker->cpu);
	wake_up_process(worker->th);

	while (READ_ONCE(worker->op)) {
		schedule();
	}

	return 0;
err:
	worker->th = NULL;
	return rc;
}

static void worker_cleanup(unsigned int cpu)
{
	worker_t *worker;

	worker = per_cpu_ptr(&all_worker, cpu);

	if (!IS_ERR_OR_NULL(worker->th)) {
		set_tsk_thread_flag(worker->th, TIF_SIGPENDING);
		kthread_stop(worker->th);
		worker->th = NULL;
	}
}

__init int worker_init_all(void)
{
	int rc;
	unsigned int cpu;

	if (worker_cpus) {
		if (cpulist_parse(worker_cpus, &__worker_cpumask)
		||  cpumask_empty(&__worker_cpumask)
		||  !cpumask_subset(&__worker_cpumask, cpu_online_mask)) {
			pr_err("Invalid parameter of \"worker_cpus\"\n");
			rc = -EINVAL;
			goto err;
		}
	} else {
		cpumask_copy(&__worker_cpumask, cpu_online_mask);
	}

	for_each_online_cpu(cpu) {
		if ((rc = worker_init(cpu))) {
			goto err;
		}
	}

	if ((rc = cmd_fn_register(MAGIC_WORKER, worker_cmd_fn))) {
		pr_err("Failed to register command functions\n");
		goto err;
	}

	return 0;
err:
	for_each_cpu(cpu, worker_cpumask) {
		worker_cleanup(cpu);
	}
	return rc;
}

void worker_cleanup_all(void)
{
	worker_op_t op;
	unsigned int cpu;

	op.opcode = WORKER_OP_FREE;
	cpumask_copy(&op.cpumask, worker_cpumask);
	worker_op_post(&op);

	for_each_cpu(cpu, worker_cpumask) {
		worker_cleanup(cpu);
	}
}

/**
 * @brief	register a poll instance
 */
int sb_worker_reg_poll(const char *name, poll_fn_t *fn, void *arg)
{
	int rc;
	worker_op_t op;

	op.opcode = WORKER_OP_REG;
	cpumask_copy(&op.cpumask, worker_cpumask);
	op.args[0] = (void *)name;
	op.args[1] = fn;
	op.args[2] = arg;
	if ((rc = worker_op_post(&op))) {
		goto err;
	}

	return 0;
err:
	return rc;
}
EXPORT_SYMBOL(sb_worker_reg_poll);

/**
 * @brief	delete a poll instance
 */
void sb_worker_unreg_poll(const char *name, poll_fn_t *fn, void *arg)
{
	worker_op_t op;

	op.opcode = WORKER_OP_UNREG;
	cpumask_copy(&op.cpumask, worker_cpumask);
	op.args[0] = (void *)name;
	op.args[1] = fn;
	op.args[2] = arg;
	worker_op_post(&op);
}
EXPORT_SYMBOL(sb_worker_unreg_poll);
