/*
 * Copyright (C) 2014
 *
 * Brick Yang <printfxxx@163.com>
 *
 * This program is free software. You can redistribute it and/or
 * modify it as you like.
 */

/**
 * @file	netdev.c
 * @brief	Net device control and result show
 */

#include <linux/init.h>
#include <linux/module.h>
#include <linux/ctype.h>
#include <linux/mutex.h>
#include <linux/jhash.h>
#include <linux/kallsyms.h>
#include <linux/rtnetlink.h>
#include <linux/etherdevice.h>
#include <linux/phy.h>

#include <mtrace.h>

#include "netdev.h"
#include "cmd.h"

#define SB_SKB_TBL_HASH_BITS	14

#define sb_skb_table_for_each_entry(e, i, tmp)						\
	for (tmp = 0, e = &sb_skb_table[(i) & jhash_mask(SB_SKB_TBL_HASH_BITS)];	\
	     (tmp < jhash_size(SB_SKB_TBL_HASH_BITS)) || (e = NULL, 0);			\
	     tmp++, e = &sb_skb_table[((i) + tmp) & jhash_mask(SB_SKB_TBL_HASH_BITS)])

typedef struct sb_skb_entry {
	atomic_t used;
	seqcount_t seq;
	struct net_device *key_ndev;
	dma_addr_t key_skbh_baddr;
	struct sk_buff *skb;
} sb_skb_entry_t;

static char *netdev_filter;
static sb_skb_entry_t sb_skb_table[jhash_size(SB_SKB_TBL_HASH_BITS)];
static LIST_HEAD(netdev_list);
static DEFINE_MUTEX(netdev_mutex);
static DEFINE_SPINLOCK(netdev_rxdump_lock);

module_param_named(devs, netdev_filter, charp, S_IRUGO);
MODULE_PARM_DESC(devs, "Net device filter");

static DEFINE_KSYM_PTR(phy_start_machine);
static DEFINE_KSYM_PTR(phy_stop_machine);

static u64 calc_per_sec_value(u64 val, s64 ns)
{
	u64 mul;

	if (val < (U64_MAX / NSEC_PER_SEC)) {
		mul = NSEC_PER_SEC;
	} else if (val < (U64_MAX / USEC_PER_SEC)) {
		mul = USEC_PER_SEC;
		ns = div64_u64(ns, NSEC_PER_SEC / USEC_PER_SEC);
	} else if (val < (U64_MAX / MSEC_PER_SEC)) {
		mul = MSEC_PER_SEC;
		ns = div64_u64(ns, NSEC_PER_SEC / MSEC_PER_SEC);
	} else {
		mul = 1;
		ns = div64_u64(ns, NSEC_PER_SEC);
	}

	if (ns <= 0) {
		return 0;
	}

	return div64_u64((val * mul) + (ns >> 1), ns);
}

static void netdev_update_stats(netdev_t *netdev, s64 ns)
{
	u64 pkts, bytes, tx_pps, tx_Bps, tx_bps, rx_pps, rx_Bps, rx_bps;
	unsigned seq;
	unsigned int cpu;
	const flow_t *flow;
	netdev_stats_t tx = {}, rx = {};

	if (ns <= 0) {
		return;
	}

	for_each_online_cpu(cpu) {
		flow = per_cpu_ptr(netdev->pcpu_flow, cpu);
		do {
			seq = raw_read_seqcount_begin(&flow->tx_stats_seq);
			pkts = flow->tx_pkts;
			bytes = flow->tx_bytes;
		} while (read_seqcount_retry(&flow->tx_stats_seq, seq));
		tx.pkts += pkts;
		tx.bytes += bytes;
	}

	if (test_bit(NETDEV_F_DEVSTATS, &netdev->flags)) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
		struct rtnl_link_stats64 stats64;

		if (netdev->netdev_ops->ndo_get_stats64) {
			stats64.rx_packets = stats64.rx_bytes = stats64.rx_errors = stats64.rx_dropped = 0;
			netdev->netdev_ops->ndo_get_stats64(netdev->ndev, &stats64);
			rx.pkts = stats64.rx_packets + stats64.rx_errors + stats64.rx_dropped;
			rx.bytes = stats64.rx_bytes;
		} else
#endif
		{
			const struct net_device_stats *stats;

			if (netdev->netdev_ops->ndo_get_stats) {
				stats = netdev->netdev_ops->ndo_get_stats(netdev->ndev);
			} else {
				stats = &netdev->ndev->stats;
			}
			rx.pkts = stats->rx_packets + stats->rx_errors + stats->rx_dropped;
			rx.bytes = stats->rx_bytes;
		}
		rx.pkts = max(rx.pkts, netdev->rx_prev.pkts);
		rx.bytes = max(rx.bytes, netdev->rx_prev.bytes);
	} else for_each_online_cpu(cpu) {
		flow = per_cpu_ptr(netdev->pcpu_flow, cpu);
		do {
			seq = raw_read_seqcount_begin(&flow->rx_stats_seq);
			pkts = flow->rx_pkts;
			bytes = flow->rx_bytes;
		} while (read_seqcount_retry(&flow->rx_stats_seq, seq));
		rx.pkts += pkts;
		rx.bytes += bytes;
	}

	pkts = tx.pkts - netdev->tx_prev.pkts;
	bytes = tx.bytes - netdev->tx_prev.bytes;
	tx_pps = calc_per_sec_value(pkts, ns);
	tx_Bps = calc_per_sec_value(bytes, ns);
	tx_bps = calc_per_sec_value(pkts * 20 * 8 + bytes * 8, ns);
	pkts = rx.pkts - netdev->rx_prev.pkts;
	bytes = rx.bytes - netdev->rx_prev.bytes;
	rx_pps = calc_per_sec_value(pkts, ns);
	rx_Bps = calc_per_sec_value(bytes, ns);
	rx_bps = (pkts && bytes) ? calc_per_sec_value(pkts * 20 * 8 + bytes * 8, ns) : 0;

	raw_write_seqcount_begin(&netdev->stats_seq);
	netdev->tx_prev = tx;
	netdev->rx_prev = rx;
	netdev->tx_pps_rt = tx_pps;
	netdev->tx_Bps_rt = tx_Bps;
	netdev->tx_bps_rt = tx_bps;
	netdev->rx_pps_rt = rx_pps;
	netdev->rx_Bps_rt = rx_Bps;
	netdev->rx_bps_rt = rx_bps;
	raw_write_seqcount_end(&netdev->stats_seq);
}

static void netdev_delayed_work(struct work_struct *work)
{
	s64 ns;
	ktime_t now;
	netdev_t *netdev;

	netdev = container_of(to_delayed_work(work), netdev_t, dwork);
	now = ktime_get();
	ns = ktime_to_ns(ktime_sub(now, netdev->time_prev));
	netdev_update_stats(netdev, ns);
	netdev->time_prev = now;

	if (test_and_clear_bit(DW_F_SET_START_STATS, &netdev->dwork_flags)) {
		raw_write_seqcount_begin(&netdev->stats_seq);
		netdev->tx_start = netdev->tx_prev;
		netdev->rx_start = netdev->rx_prev;
		netdev->time_start = now;
		raw_write_seqcount_end(&netdev->stats_seq);
	}

	if (test_and_clear_bit(DW_F_SET_STOP_STATS, &netdev->dwork_flags)) {
		raw_write_seqcount_begin(&netdev->stats_seq);
		netdev->tx_stop = netdev->tx_prev;
		netdev->rx_stop = netdev->rx_prev;
		netdev->time_stop = now;
		netdev->time_force_stop = ktime_set(0, 0);
		raw_write_seqcount_end(&netdev->stats_seq);
	}

	if (test_and_clear_bit(DW_F_CLR_STATS, &netdev->dwork_flags)) {
		raw_write_seqcount_begin(&netdev->stats_seq);
		netdev->tx_start = netdev->tx_stop = netdev->tx_base = netdev->tx_prev;
		netdev->rx_start = netdev->rx_stop = netdev->rx_base = netdev->rx_prev;
		netdev->time_start = netdev->time_stop = now;
		raw_write_seqcount_end(&netdev->stats_seq);
	}

	if (test_bit(DW_F_SET_FORCE_STOP, &netdev->dwork_flags)) {
		raw_write_seqcount_begin(&netdev->stats_seq);
		if (!ktime_to_ns(netdev->time_force_stop)) {
			netdev->time_force_stop = ktime_add_ms(now, MSEC_PER_SEC * 3);
		} else {
			int (*force_stop)(struct net_device *);
			force_stop = netdev->priv_ops ? netdev->priv_ops->netdev_force_stop : NULL;
			if (force_stop && ktime_after(now, netdev->time_force_stop)) {
				netdev_info(netdev->ndev, "start force stop ...\n");
				force_stop(netdev->ndev);
				netdev->time_force_stop = ktime_add_ms(now, MSEC_PER_SEC * 3);
			}
		}
		raw_write_seqcount_end(&netdev->stats_seq);
	}

	schedule_delayed_work(&netdev->dwork, msecs_to_jiffies(MSEC_PER_SEC));
}

static netdev_t *netdev_find(const struct net_device *ndev)
{
	netdev_t *netdev, *tmp;

	BUG_ON(!mutex_is_locked(&netdev_mutex));

	list_for_each_entry_safe(netdev, tmp, &netdev_list, node) {
		if ((netdev->ndev == ndev)) {
			return netdev;
		}
	}

	return NULL;
}

static netdev_t *netdev_find_by_name(const char *name)
{
	netdev_t *netdev, *tmp;

	BUG_ON(!mutex_is_locked(&netdev_mutex));

	list_for_each_entry_safe(netdev, tmp, &netdev_list, node) {
		if (!strcmp(netdev_name(netdev->ndev), name)) {
			return netdev;
		}
	}

	return NULL;
}

static int netdev_add(struct net_device *add, netdev_priv_ops_t *ops)
{
	int rc;
	flow_t *flow;
	netdev_t *netdev = NULL;
	worker_op_t op;
	unsigned int cpu;

	BUG_ON(!mutex_is_locked(&netdev_mutex));

	if (!(netdev = kzalloc(sizeof(*netdev), GFP_KERNEL)) || MTRACE_KMEM_ADD(netdev)
	||  !(netdev->pcpu_flow = alloc_percpu(flow_t)) || MTRACE_PERCPU_ADD(netdev->pcpu_flow)) {
		netdev_err(add, "%s(): failed to alloc memory\n", __func__);
		rc = -ENOMEM;
		goto err;
	}

	netdev->ndev = add;
	netdev->priv_ops = ops;
	if (ops) {
		BUG_ON(dev_get_drvdata(&add->dev));
		dev_set_drvdata(&add->dev, netdev);
	}
	INIT_DELAYED_WORK(&netdev->dwork, netdev_delayed_work);
	seqcount_init(&netdev->stats_seq);

	for_each_online_cpu(cpu) {
		flow = per_cpu_ptr(netdev->pcpu_flow, cpu);
		seqcount_init(&flow->tx_stats_seq);
		seqcount_init(&flow->rx_stats_seq);
	}
	op.opcode = WORKER_OP_BIND;
	cpumask_copy(&op.cpumask, worker_cpumask);
	op.args[0] = netdev;
	if ((rc = worker_op_post(&op))) {
		goto err;
	}

	INIT_LIST_HEAD(&netdev->node);
	list_add_tail(&netdev->node, &netdev_list);

	return 0;
err:
	if (netdev) {
		MTRACE_PERCPU_DEL(netdev->pcpu_flow);
		free_percpu(netdev->pcpu_flow);
		MTRACE_KMEM_DEL(netdev);
		kfree(netdev);
	}
	return rc;
}

static void netdev_del(netdev_t *netdev)
{
	int rc;
	worker_op_t op;

	BUG_ON(!mutex_is_locked(&netdev_mutex));

	list_del_rcu(&netdev->node);
	synchronize_net();

	op.opcode = WORKER_OP_UNBIND | WORKER_OP_F_PARALLEL;
	cpumask_copy(&op.cpumask, worker_cpumask);
	op.args[0] = netdev;
	rc = worker_op_post(&op);
	WARN_ON(rc);

	MTRACE_PERCPU_DEL(netdev->pcpu_flow);
	free_percpu(netdev->pcpu_flow);
	MTRACE_KMEM_DEL(netdev);
	kfree(netdev);
}

static netdev_tx_t netdev_hook_xmit(struct sk_buff *skb, struct net_device *ndev)
{
	dev_kfree_skb(skb);

	return NETDEV_TX_OK;
}

static void netdev_receive(netdev_t *netdev, struct sk_buff *skb)
{
	flow_t *flow;

	if (unlikely(test_bit(NETDEV_F_RXDUMP, &netdev->flags))) {
		spin_lock_bh(&netdev_rxdump_lock);
		pr_info("%s:\n", netdev_name(netdev->ndev));
		print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, skb->data - ETH_HLEN, skb->len + ETH_HLEN, false);
		spin_unlock_bh(&netdev_rxdump_lock);
	}

	if (!test_bit(NETDEV_F_DEVSTATS, &netdev->flags)) {
		flow = this_cpu_ptr(netdev->pcpu_flow);
		raw_write_seqcount_begin(&flow->rx_stats_seq);
		flow->rx_pkts++;
		flow->rx_bytes += skb->len + ETH_HLEN + ETH_FCS_LEN;
		raw_write_seqcount_end(&flow->rx_stats_seq);
	}
}

static int netdev_pt_recv(struct sk_buff *skb, struct net_device *ndev,
			  struct packet_type *pt, struct net_device *orig_dev)
{
	netdev_t *netdev;

	skb->protocol = htons(ETH_P_LOOP);
	if (likely(skb->pkt_type != PACKET_LOOPBACK)
	&&  likely(skb->pkt_type != PACKET_OUTGOING)) {
		list_for_each_entry_rcu(netdev, &netdev_list, node) {
			if ((netdev->ndev == ndev)) {
				netdev_receive(netdev, skb);
			}
		}
	}
	dev_kfree_skb(skb);

	return 0;
}

static int netdev_attach(netdev_t *netdev)
{
	int rc;
	unsigned int flags, flags_new;
	netdev_features_t features, features_new;
	struct net_device *ndev = NULL;

	ASSERT_RTNL();

	if (test_bit(NETDEV_F_ATTACHED, &netdev->flags)) {
		netdev_err(netdev->ndev, "already attached\n");
		rc = -EBUSY;
		goto err;
	}

	ndev = netdev->ndev;

	if (!netdev->priv_ops) {
		netif_tx_lock_bh(ndev);
		netdev->hook = *ndev->netdev_ops;
		netdev->hook.ndo_start_xmit = netdev_hook_xmit;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
#if LINUX_VERSION_CODE == KERNEL_VERSION(4, 14, 0)
		netdev->hook.ndo_xdp = NULL;
#else
		netdev->hook.ndo_bpf = NULL;
#endif
		netdev->hook.ndo_xdp_xmit = NULL;
#if LINUX_VERSION_CODE <= KERNEL_VERSION(4, 18, 0)
		netdev->hook.ndo_xdp_flush = NULL;
#endif
#endif
		netdev->netdev_ops = ndev->netdev_ops;
		ndev->netdev_ops = &netdev->hook;
		netif_tx_unlock_bh(ndev);
		features_new = features = ndev->features;
		features_new &= ~(NETIF_F_GSO | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_RXCSUM |
				  NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM);
		if (features != features_new) {
			SET_NETDEV_FEATURES(ndev, features_new);
			netdev_update_features(ndev);
			netdev->ndev_features_set = (features ^ features_new) & features_new;
			netdev->ndev_features_clr = (features ^ features_new) & ~features_new;
		}
		netdev->pt.type = htons(ETH_P_ALL);
		netdev->pt.func = netdev_pt_recv;
		netdev->pt.dev = ndev;
		dev_add_pack(&netdev->pt);
		flags_new = flags = netif_get_flags(ndev);
		flags_new |= IFF_PROMISC;
		if (flags != flags_new) {
			if ((rc = dev_change_flags(ndev, flags_new, NULL))) {
				netdev_err(ndev, "failed to set flags to 0x%x\n", flags_new);
				goto err_change;
			}
			netdev->ndev_flags_set = (flags ^ flags_new) & flags_new;
			netdev->ndev_flags_clr = (flags ^ flags_new) & ~flags_new;
		}
	} else {
		netdev->netdev_ops = ndev->netdev_ops;
	}

	clear_bit(NETDEV_F_RXDUMP, &netdev->flags);
	clear_bit(NETDEV_F_DEVSTATS, &netdev->flags);
	set_bit(DW_F_CLR_STATS, &netdev->dwork_flags);
	schedule_delayed_work(&netdev->dwork, 0);
	set_bit(NETDEV_F_ATTACHED, &netdev->flags);
	return 0;

err_change:
	dev_remove_pack(&netdev->pt);
	if (netdev->ndev_features_set || netdev->ndev_features_clr) {
		features = ndev->features;
		features &= ~netdev->ndev_features_set;
		features |= netdev->ndev_features_clr;
		SET_NETDEV_FEATURES(ndev, features);
		netdev_update_features(ndev);
	}
err:
	return rc;
}

static void netdev_detach(netdev_t *netdev)
{
	unsigned int flags;
	netdev_features_t features;
	struct net_device *ndev;

	ASSERT_RTNL();

	if (!test_bit(NETDEV_F_ATTACHED, &netdev->flags)) {
		goto end;
	}

	cancel_delayed_work_sync(&netdev->dwork);

	if (!netdev->priv_ops) {
		ndev = netdev->ndev;
		netif_tx_lock_bh(ndev);
		ndev->netdev_ops = netdev->netdev_ops;
		netif_tx_unlock_bh(ndev);
		if (netdev->ndev_flags_set || netdev->ndev_flags_clr) {
			flags = netif_get_flags(ndev);
			flags &= ~netdev->ndev_flags_set;
			flags |= netdev->ndev_flags_clr;
			dev_change_flags(ndev, flags, NULL);
		}
		dev_remove_pack(&netdev->pt);
		if (netdev->ndev_features_set || netdev->ndev_features_clr) {
			features = ndev->features;
			features &= ~netdev->ndev_features_set;
			features |= netdev->ndev_features_clr;
			SET_NETDEV_FEATURES(ndev, features);
			netdev_update_features(ndev);
		}
	}

	clear_bit(NETDEV_F_ATTACHED, &netdev->flags);
end:
	return;
}

static int __netdev_notifier(struct net_device *ndev, unsigned long event,
			     void *arg)
{
	int rc;
	netdev_t *netdev;
	worker_op_t op;

	if (!net_eq(dev_net(ndev), &init_net)
	||  !sb_netdev_in_filter(netdev_name(ndev))) {
		goto ok;
	}

	switch (event) {
	case NETDEV_UP:
		mutex_lock(&netdev_mutex);
		rc = 0;
		if (!(netdev = netdev_find(ndev))) {
			rc = netdev_add(ndev, arg);
		}
		mutex_unlock(&netdev_mutex);
		if (rc) {
			goto err;
		}
		break;

	case NETDEV_GOING_DOWN:
		mutex_lock(&netdev_mutex);
		if ((netdev = netdev_find(ndev))) {
			set_bit(DW_F_SET_STOP_STATS, &netdev->dwork_flags);
			flush_delayed_work(&netdev->dwork);
			set_bit(DW_F_SET_FORCE_STOP, &netdev->dwork_flags);
			flush_delayed_work(&netdev->dwork);
			op.opcode = WORKER_OP_STOP | WORKER_OP_F_PARALLEL;
			cpumask_copy(&op.cpumask, worker_cpumask);
			op.args[0] = netdev;
			rc = worker_op_post(&op);
			WARN_ON(rc);
			clear_bit(DW_F_SET_FORCE_STOP, &netdev->dwork_flags);
			clear_bit(NETDEV_F_STARTED, &netdev->flags);
		}
		mutex_unlock(&netdev_mutex);
		break;

	case NETDEV_DOWN:
		mutex_lock(&netdev_mutex);
		if ((netdev = netdev_find(ndev))) {
			netdev_detach(netdev);
			netdev_del(netdev);
		}
		mutex_unlock(&netdev_mutex);
		break;

	default:
		break;
	}
ok:
	return notifier_from_errno(0);
err:
	return notifier_from_errno(rc);
}

static int netdev_notifier(struct notifier_block *block,
			   unsigned long event, void *ptr)
{
	struct net_device *ndev;

	ndev = netdev_notifier_info_to_dev(ptr);

	return __netdev_notifier(ndev, event, NULL);
}

static struct notifier_block netdev_notifier_block = {
	.notifier_call = netdev_notifier
};

static int netdev_cmd_netdev(proto_handle_t *handle, proto_rxd_t *rxd, unsigned long param)
{
	u8 fc;
	u16 lcladv, rmtadv;
	int rc;
	netdev_t *netdev, *tmp;
	const char *arg_netdev;
	struct net_device *ndev;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
	struct ethtool_link_ksettings ks;
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0)
	unsigned long supported, advertising, lp_advertising;
	struct ethtool_cmd cmd;
#endif
	rxd->len = rxd->hdr.length;
	arg_netdev = proto_get_str(&rxd->buf, &rxd->len);

	mutex_lock(&netdev_mutex);

	if (!arg_netdev) {
		list_for_each_entry_safe(netdev, tmp, &netdev_list, node) {
			ndev = netdev->ndev;
			cmd_pr_info(handle, "%s%s%s%s%s\n", netdev_name(ndev),
				    ndev->phydev ? " <---> " : "", ndev->phydev ? phydev_name(ndev->phydev) : "",
				    netdev->priv_ops ? " [priv]": "",
				    test_bit(NETDEV_F_ATTACHED, &netdev->flags) ? " [attach]" : "");
		}
	} else {
		if (!(netdev = netdev_find_by_name(arg_netdev))) {
			cmd_pr_err(handle, "ERR: netdev \"%s\" not found\n", arg_netdev);
			rc = -ENODEV;
			mutex_unlock(&netdev_mutex);
			goto err;
		}
		rc = 0;
		if (!test_bit(NETDEV_F_ATTACHED, &netdev->flags)) {
			cmd_pr_err(handle, "ERR: netdev \"%s\" not attached\n", arg_netdev);
			rc = -ENODEV;
			mutex_unlock(&netdev_mutex);
			goto err;
		}
		ndev = netdev->ndev;

		rtnl_lock();
		cmd_pr_info(handle, "state=%s, rxdump=%s, devstats=%s, txq_nr=%d\n",
			    test_bit(NETDEV_F_STARTED, &netdev->flags) ? "started" : "stopped",
			    test_bit(NETDEV_F_RXDUMP, &netdev->flags) ? "on" : "off",
			    test_bit(NETDEV_F_DEVSTATS, &netdev->flags) ? "on" : "off",
			    ndev->real_num_tx_queues);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
		if (ndev->ethtool_ops->get_link_ksettings) {
			memset(&ks, 0, sizeof(ks));
			if ((rc = ndev->ethtool_ops->get_link_ksettings(ndev, &ks))) {
				netdev_err(ndev, "failed to get link setting\n");
			}
			lcladv = 0;
			lcladv |= linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
						    ks.link_modes.advertising) ? ADVERTISE_PAUSE_CAP : 0;
			lcladv |= linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
						    ks.link_modes.advertising) ? ADVERTISE_PAUSE_ASYM : 0;
			rmtadv = 0;
			rmtadv |= linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
						    ks.link_modes.lp_advertising) ? ADVERTISE_PAUSE_CAP : 0;
			rmtadv |= linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
						    ks.link_modes.lp_advertising) ? ADVERTISE_PAUSE_ASYM : 0;
			fc = (ks.base.autoneg == AUTONEG_ENABLE) ? mii_resolve_flowctrl_fdx(lcladv, rmtadv) :
								   mii_resolve_flowctrl_fdx(lcladv, lcladv);
		} else
#endif
		{
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0)
			memset(&cmd, 0, sizeof(cmd));
			if ((rc = (ndev->ethtool_ops->get_settings ?
				   ndev->ethtool_ops->get_settings(ndev, &cmd) : -EOPNOTSUPP))) {
				netdev_err(ndev, "failed to get link setting\n");
			}
			supported = cmd.supported;
			advertising = cmd.advertising;
			lp_advertising = cmd.lp_advertising;
			lcladv = 0;
			lcladv |= (advertising & ADVERTISED_Pause) ? ADVERTISE_PAUSE_CAP : 0;
			lcladv |= (advertising & ADVERTISED_Asym_Pause) ? ADVERTISE_PAUSE_ASYM : 0;
			rmtadv = 0;
			rmtadv |= (lp_advertising & ADVERTISED_Pause) ? ADVERTISE_PAUSE_CAP : 0;
			rmtadv |= (lp_advertising & ADVERTISED_Asym_Pause) ? ADVERTISE_PAUSE_ASYM : 0;
			fc = (cmd.autoneg == AUTONEG_ENABLE) ? mii_resolve_flowctrl_fdx(lcladv, rmtadv) :
							       mii_resolve_flowctrl_fdx(lcladv, lcladv);
#else
			rc = -EOPNOTSUPP;
#endif
		}
		if (!rc) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
			if (ndev->ethtool_ops->get_link_ksettings) {
				cmd_pr_info(handle, "link=%s, autoneg=%s, speed=%s, duplex=%s, flowctrl=%s\n",
					    netif_carrier_ok(ndev) ? "Up" : "Down",
					    (ks.base.autoneg == AUTONEG_ENABLE) ? "Enable" : "Disable",
					    phy_speed_to_str(ks.base.speed), phy_duplex_to_str(ks.base.duplex),
					    (fc == (FLOW_CTRL_TX | FLOW_CTRL_RX)) ? "tx/rx" :
					    (fc == FLOW_CTRL_TX) ? "tx" : (fc == FLOW_CTRL_RX) ? "rx" : "off");
				cmd_pr_info(handle, "supported=%*pb (%*pbl)\nadvertising=%*pb (%*pbl)\nlp_advertising=%*pb (%*pbl)\n",
					    __ETHTOOL_LINK_MODE_MASK_NBITS, ks.link_modes.supported,
					    __ETHTOOL_LINK_MODE_MASK_NBITS, ks.link_modes.supported,
					    __ETHTOOL_LINK_MODE_MASK_NBITS, ks.link_modes.advertising,
					    __ETHTOOL_LINK_MODE_MASK_NBITS, ks.link_modes.advertising,
					    __ETHTOOL_LINK_MODE_MASK_NBITS, ks.link_modes.lp_advertising,
					    __ETHTOOL_LINK_MODE_MASK_NBITS, ks.link_modes.lp_advertising);
			} else
#endif
			{
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0)
				cmd_pr_info(handle, "link=%s, autoneg=%s, speed=%s, duplex=%s, flowctrl=%s\n",
					    netif_carrier_ok(ndev) ? "Up" : "Down", (cmd.autoneg == AUTONEG_ENABLE) ? "Enable" : "Disable",
					    phy_speed_to_str(cmd.speed), phy_duplex_to_str(cmd.duplex),
					    (fc == (FLOW_CTRL_TX | FLOW_CTRL_RX)) ? "tx/rx" :
					    (fc == FLOW_CTRL_TX) ? "tx" : (fc == FLOW_CTRL_RX) ? "rx" : "off");
				cmd_pr_info(handle, "supported=%*pb (%*pbl)\nadvertising=%*pb (%*pbl)\nlp_advertising=%*pb (%*pbl)\n",
					    BITS_PER_LONG, &supported,
					    BITS_PER_LONG, &supported,
					    BITS_PER_LONG, &advertising,
					    BITS_PER_LONG, &advertising,
					    BITS_PER_LONG, &lp_advertising,
					    BITS_PER_LONG, &lp_advertising);
#endif
			}
		}
		rtnl_unlock();
	}

	mutex_unlock(&netdev_mutex);
	return 0;
err:
	return rc;
}

static int netdev_cmd_pause(proto_handle_t *handle, proto_rxd_t *rxd, unsigned long param)
{
	int rc;
	u32 opts, u32[3];
	netdev_t *netdev;
	const char *arg_netdev;
	proto_rxd_t r;
	struct net_device *ndev;
	struct ethtool_pauseparam pp;

	r = *rxd;
	r.len = r.hdr.length;
	if (!(arg_netdev = proto_get_str(&r.buf, &r.len))
	||  (proto_get_u32(&r.buf, &r.len, &opts) < 0)
	||  (proto_get_u32(&r.buf, &r.len, &u32[0]) < 0)
	||  (proto_get_u32(&r.buf, &r.len, &u32[1]) < 0)
	||  (proto_get_u32(&r.buf, &r.len, &u32[2]) < 0)) {
		rc = -EINVAL;
		goto err;
	}

	mutex_lock(&netdev_mutex);

	if (!(netdev = netdev_find_by_name(arg_netdev))) {
		cmd_pr_err(handle, "ERR: netdev \"%s\" not found\n", arg_netdev);
		rc = -ENODEV;
		goto err_unlock_dev;
	}

	if (!test_bit(NETDEV_F_ATTACHED, &netdev->flags)) {
		cmd_pr_err(handle, "ERR: netdev \"%s\" not attached\n", arg_netdev);
		rc = -ENODEV;
		goto err_unlock_dev;
	}

	if (test_bit(NETDEV_F_STARTED, &netdev->flags)) {
		rc = -EBUSY;
		goto err_unlock_dev;
	}

	ndev = netdev->ndev;

	rtnl_lock();
	memset(&pp, 0, sizeof(pp));
	if ((rc = (ndev->ethtool_ops->get_pauseparam ?
		   (ndev->ethtool_ops->get_pauseparam(ndev, &pp), 0) : -EOPNOTSUPP))) {
		netdev_err(ndev, "failed to get pause param\n");
		goto err_unlock;
	}
	if (opts & BIT(NETDEV_PAUSE_OPT_AUTONEG)) {
		pp.autoneg = u32[NETDEV_PAUSE_OPT_AUTONEG];
	}
	if (opts & BIT(NETDEV_PAUSE_OPT_RX)) {
		pp.rx_pause = u32[NETDEV_PAUSE_OPT_RX];
	}
	if (opts & BIT(NETDEV_PAUSE_OPT_TX)) {
		pp.tx_pause = u32[NETDEV_PAUSE_OPT_TX];
	}
	if ((rc = (ndev->ethtool_ops->set_pauseparam ?
		   ndev->ethtool_ops->set_pauseparam(ndev, &pp) : -EOPNOTSUPP))) {
		netdev_err(ndev, "failed to set pause param\n");
		goto err_unlock;
	}
	rtnl_unlock();
	mutex_unlock(&netdev_mutex);
	return 0;

err_unlock:
	rtnl_unlock();
err_unlock_dev:
	mutex_unlock(&netdev_mutex);
err:
	return rc;
}

static int netdev_cmd_link(proto_handle_t *handle, proto_rxd_t *rxd, unsigned long param)
{
	int rc;
	u32 opts, u32[3];
	netdev_t *netdev;
	const char *arg_netdev, *arg_advset, *arg_advclr;
	proto_rxd_t r;
	struct net_device *ndev;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
	struct ethtool_link_ksettings ks;
	__ETHTOOL_DECLARE_LINK_MODE_MASK(lmadv);
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0)
	unsigned long adv;
	struct ethtool_cmd cmd;
#endif
	r = *rxd;
	r.len = r.hdr.length;
	if (!(arg_netdev = proto_get_str(&r.buf, &r.len))
	||  (proto_get_u32(&r.buf, &r.len, &opts) < 0)
	||  (proto_get_u32(&r.buf, &r.len, &u32[0]) < 0)
	||  (proto_get_u32(&r.buf, &r.len, &u32[1]) < 0)
	||  (proto_get_u32(&r.buf, &r.len, &u32[2]) < 0)
	||  !(arg_advset = proto_get_str(&r.buf, &r.len))
	||  !(arg_advclr = proto_get_str(&r.buf, &r.len))) {
		rc = -EINVAL;
		goto err;
	}

	mutex_lock(&netdev_mutex);

	if (!(netdev = netdev_find_by_name(arg_netdev))) {
		cmd_pr_err(handle, "ERR: netdev \"%s\" not found\n", arg_netdev);
		rc = -ENODEV;
		goto err_unlock_dev;
	}

	if (!test_bit(NETDEV_F_ATTACHED, &netdev->flags)) {
		cmd_pr_err(handle, "ERR: netdev \"%s\" not attached\n", arg_netdev);
		rc = -ENODEV;
		goto err_unlock_dev;
	}

	if (test_bit(NETDEV_F_STARTED, &netdev->flags)) {
		rc = -EBUSY;
		goto err_unlock_dev;
	}

	ndev = netdev->ndev;

	rtnl_lock();
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
	if (ndev->ethtool_ops->get_link_ksettings) {
		memset(&ks, 0, sizeof(ks));
		if ((rc = ndev->ethtool_ops->get_link_ksettings(ndev, &ks))) {
			netdev_err(ndev, "failed to get link setting\n");
			goto err_unlock;
		}
		if (opts & BIT(NETDEV_LINK_OPT_AUTONEG)) {
			ks.base.autoneg = u32[NETDEV_LINK_OPT_AUTONEG] ? AUTONEG_ENABLE : AUTONEG_DISABLE;
		}
		if (opts & BIT(NETDEV_LINK_OPT_DUPLEX)) {
			ks.base.duplex = u32[NETDEV_LINK_OPT_DUPLEX] ? DUPLEX_FULL : DUPLEX_HALF;
		}
		if (opts & BIT(NETDEV_LINK_OPT_SPEED)) {
			ks.base.speed = u32[NETDEV_LINK_OPT_SPEED];
		}
		if (opts & BIT(NETDEV_LINK_OPT_ADVSET)) {
			if ((rc = bitmap_parselist(arg_advset, lmadv, __ETHTOOL_LINK_MODE_MASK_NBITS))) {
				rc = -EINVAL;
				goto err_unlock;
			}
			linkmode_and(lmadv, lmadv, ks.link_modes.supported);
			linkmode_or(ks.link_modes.advertising, ks.link_modes.advertising, lmadv);
		}
		if (opts & BIT(NETDEV_LINK_OPT_ADVCLR)) {
			if ((rc = bitmap_parselist(arg_advclr, lmadv, __ETHTOOL_LINK_MODE_MASK_NBITS))) {
				rc = -EINVAL;
				goto err_unlock;
			}
			linkmode_and(lmadv, lmadv, ks.link_modes.supported);
			linkmode_andnot(ks.link_modes.advertising, ks.link_modes.advertising, lmadv);
		}

		if ((rc = (ndev->ethtool_ops->set_link_ksettings ?
			   ndev->ethtool_ops->set_link_ksettings(ndev, &ks) : -EOPNOTSUPP))) {
			netdev_err(ndev, "failed to set link setting\n");
			goto err_unlock;
		}
	} else
#endif
	{
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0)
		memset(&cmd, 0, sizeof(cmd));
		if ((rc = (ndev->ethtool_ops->get_settings ?
			   ndev->ethtool_ops->get_settings(ndev, &cmd) : -EOPNOTSUPP))) {
			netdev_err(ndev, "failed to get link setting\n");
			goto err_unlock;
		}
		if (opts & BIT(NETDEV_LINK_OPT_AUTONEG)) {
			cmd.autoneg = u32[NETDEV_LINK_OPT_AUTONEG] ? AUTONEG_ENABLE : AUTONEG_DISABLE;
		}
		if (opts & BIT(NETDEV_LINK_OPT_DUPLEX)) {
			cmd.duplex = u32[NETDEV_LINK_OPT_DUPLEX] ? DUPLEX_FULL : DUPLEX_HALF;
		}
		if (opts & BIT(NETDEV_LINK_OPT_SPEED)) {
			cmd.speed = u32[NETDEV_LINK_OPT_SPEED];
		}
		if (opts & BIT(NETDEV_LINK_OPT_ADVSET)) {
			if ((rc = bitmap_parselist(arg_advset, &adv, BITS_PER_LONG))) {
				rc = -EINVAL;
				goto err_unlock;
			}
			adv &= cmd.supported;
			cmd.advertising |= adv;
		}
		if (opts & BIT(NETDEV_LINK_OPT_ADVCLR)) {
			if ((rc = bitmap_parselist(arg_advclr, &adv, BITS_PER_LONG))) {
				rc = -EINVAL;
				goto err_unlock;
			}
			adv &= cmd.supported;
			cmd.advertising &= ~adv;
		}

		if ((rc = (ndev->ethtool_ops->set_settings ?
			   ndev->ethtool_ops->set_settings(ndev, &cmd) : -EOPNOTSUPP))) {
			netdev_err(ndev, "failed to set link setting\n");
			goto err_unlock;
		}
#else
		rc = -EOPNOTSUPP;
		goto err_unlock;
#endif
	}

	rtnl_unlock();
	mutex_unlock(&netdev_mutex);
	return 0;

err_unlock:
	rtnl_unlock();
err_unlock_dev:
	mutex_unlock(&netdev_mutex);
err:
	return rc;
}

static int netdev_cmd_start(proto_handle_t *handle, proto_rxd_t *rxd, unsigned long param)
{
	int i, rc;
	netdev_t *netdev;
	const char *arg_netdev;
	worker_op_t op;
	struct phy_device *phydev;

	rxd->len = rxd->hdr.length;
	if (!(arg_netdev = proto_get_str(&rxd->buf, &rxd->len))) {
		rc = -EINVAL;
		goto err;
	}

	mutex_lock(&netdev_mutex);

	if (!(netdev = netdev_find_by_name(arg_netdev))) {
		cmd_pr_err(handle, "ERR: netdev \"%s\" not found\n", arg_netdev);
		rc = -ENODEV;
		goto err_unlock;
	}

	if (!test_bit(NETDEV_F_ATTACHED, &netdev->flags)) {
		cmd_pr_err(handle, "ERR: netdev \"%s\" not attached\n", arg_netdev);
		rc = -ENODEV;
		goto err_unlock;
	}

	if (test_bit(NETDEV_F_STARTED, &netdev->flags)) {
		rc = -EBUSY;
		goto err_unlock;
	}

#define LINK_CHECK_NR	10
#define LINK_CHECK_INVL	500
	for (i = 0; i < LINK_CHECK_NR; i++) {
		if (netif_carrier_ok(netdev->ndev)) {
			break;
		}
		msleep(LINK_CHECK_INVL);
	}

	if (!netif_carrier_ok(netdev->ndev)) {
		netdev_err(netdev->ndev, "no link detect\n");
		rc = -EIO;
		goto err_unlock;
	}

	if ((phydev = netdev->ndev->phydev)) {
		if (phy_interrupt_is_valid(phydev)) {
			disable_irq(phydev->irq);
		}
		CALL_KSYM_PTR(phy_stop_machine, phydev);
	}

	op.opcode = WORKER_OP_START | WORKER_OP_F_PARALLEL;
	cpumask_copy(&op.cpumask, worker_cpumask);
	op.args[0] = netdev;
	rc = worker_op_post(&op);
	WARN_ON(rc);
	set_bit(NETDEV_F_STARTED, &netdev->flags);
	set_bit(DW_F_SET_START_STATS, &netdev->dwork_flags);
	flush_delayed_work(&netdev->dwork);

	mutex_unlock(&netdev_mutex);
	return 0;

err_unlock:
	mutex_unlock(&netdev_mutex);
err:
	return rc;
}

static int netdev_cmd_stop(proto_handle_t *handle, proto_rxd_t *rxd, unsigned long param)
{
	int rc;
	netdev_t *netdev;
	const char *arg_netdev;
	worker_op_t op;
	struct phy_device *phydev;

	rxd->len = rxd->hdr.length;
	if (!(arg_netdev = proto_get_str(&rxd->buf, &rxd->len))) {
		rc = -EINVAL;
		goto err;
	}

	mutex_lock(&netdev_mutex);

	if (!(netdev = netdev_find_by_name(arg_netdev))) {
		cmd_pr_err(handle, "ERR: netdev \"%s\" not found\n", arg_netdev);
		rc = -ENODEV;
		goto err_unlock;
	}

	if (!test_bit(NETDEV_F_ATTACHED, &netdev->flags)) {
		cmd_pr_err(handle, "ERR: netdev \"%s\" not attached\n", arg_netdev);
		rc = -ENODEV;
		goto err_unlock;
	}

	if (!test_bit(NETDEV_F_STARTED, &netdev->flags)) {
		goto ok;
	}

	set_bit(DW_F_SET_STOP_STATS, &netdev->dwork_flags);
	flush_delayed_work(&netdev->dwork);
	set_bit(DW_F_SET_FORCE_STOP, &netdev->dwork_flags);
	flush_delayed_work(&netdev->dwork);
	op.opcode = WORKER_OP_STOP | WORKER_OP_F_PARALLEL;
	cpumask_copy(&op.cpumask, worker_cpumask);
	op.args[0] = netdev;
	rc = worker_op_post(&op);
	WARN_ON(rc);
	clear_bit(DW_F_SET_FORCE_STOP, &netdev->dwork_flags);
	clear_bit(NETDEV_F_STARTED, &netdev->flags);

	if ((phydev = netdev->ndev->phydev)) {
		if (phy_interrupt_is_valid(phydev)) {
			enable_irq(phydev->irq);
		}
		CALL_KSYM_PTR(phy_start_machine, phydev);
	}
ok:
	mutex_unlock(&netdev_mutex);
	return 0;

err_unlock:
	mutex_unlock(&netdev_mutex);
err:
	return rc;
}

static int netdev_cmd_clear(proto_handle_t *handle, proto_rxd_t *rxd, unsigned long param)
{
	int rc;
	netdev_t *netdev;
	const char *arg_netdev;

	rxd->len = rxd->hdr.length;
	if (!(arg_netdev = proto_get_str(&rxd->buf, &rxd->len))) {
		rc = -EINVAL;
		goto err;
	}

	mutex_lock(&netdev_mutex);

	if (!(netdev = netdev_find_by_name(arg_netdev))) {
		cmd_pr_err(handle, "ERR: netdev \"%s\" not found\n", arg_netdev);
		rc = -ENODEV;
		goto err_unlock;
	}

	if (!test_bit(NETDEV_F_ATTACHED, &netdev->flags)) {
		cmd_pr_err(handle, "ERR: netdev \"%s\" not attached\n", arg_netdev);
		rc = -ENODEV;
		goto err_unlock;
	}

	if (test_bit(NETDEV_F_STARTED, &netdev->flags)) {
		rc = -EBUSY;
		goto err_unlock;
	}

	set_bit(DW_F_CLR_STATS, &netdev->dwork_flags);
	flush_delayed_work(&netdev->dwork);

	mutex_unlock(&netdev_mutex);
	return 0;

err_unlock:
	mutex_unlock(&netdev_mutex);
err:
	return rc;
}

static void thousands_grouping_ull(proto_handle_t *handle, u64 val)
{
	u32 rem;
	u64 g, m, k;

	g = div_u64_rem(val, 1000000000u, &rem);
	m = div_u64_rem(rem, 1000000u, &rem);
	k = div_u64_rem(rem, 1000u, &rem);

	if (g) {
		cmd_pr_info(handle, "%llu,%03llu,%03llu,%03u", g, m, k, rem);
	} else if (m) {
		cmd_pr_info(handle, "%llu,%03llu,%03u", m, k, rem);
	} else if (k) {
		cmd_pr_info(handle, "%llu,%03u", k, rem);
	} else {
		cmd_pr_info(handle, "%u", rem);
	}
}

static void netdev_cmd_stats_single(proto_handle_t *handle, netdev_t *netdev)
{
	s64 ns;
	u64 tx_pps, tx_Bps, tx_bps, rx_pps, rx_Bps, rx_bps;
	unsigned seq;
	netdev_stats_t tx, rx;

	cmd_pr_info(handle, "{%s}\n", netdev_name(netdev->ndev));

	do {
		seq = raw_read_seqcount_begin(&netdev->stats_seq);
		tx.pkts = netdev->tx_prev.pkts - netdev->tx_base.pkts;
		tx.bytes = netdev->tx_prev.bytes - netdev->tx_base.bytes;
		rx.pkts = netdev->rx_prev.pkts - netdev->rx_base.pkts;
		rx.bytes = netdev->rx_prev.bytes - netdev->rx_base.bytes;
		tx_pps = netdev->tx_pps_rt;
		tx_Bps = netdev->tx_Bps_rt;
		tx_bps = netdev->tx_bps_rt;
		rx_pps = netdev->rx_pps_rt;
		rx_Bps = netdev->rx_Bps_rt;
		rx_bps = netdev->rx_bps_rt;
	} while (read_seqcount_retry(&netdev->stats_seq, seq));

	cmd_pr_info(handle, "[stats]\n<tx> pkts=");
	thousands_grouping_ull(handle, tx.pkts);
	cmd_pr_info(handle, ", bytes=");
	thousands_grouping_ull(handle, tx.bytes);
	cmd_pr_info(handle, "\n<rx> pkts=");
	thousands_grouping_ull(handle, rx.pkts);
	cmd_pr_info(handle, ", bytes=");
	thousands_grouping_ull(handle, rx.bytes);
	cmd_pr_info(handle, "\n");

	cmd_pr_info(handle, "[rt]\n<tx> pps=");
	thousands_grouping_ull(handle, tx_pps);
	cmd_pr_info(handle, ", Bps=");
	thousands_grouping_ull(handle, tx_Bps);
	cmd_pr_info(handle, ", bps=");
	thousands_grouping_ull(handle, tx_bps);
	cmd_pr_info(handle, "\n<rx> pps=");
	thousands_grouping_ull(handle, rx_pps);
	cmd_pr_info(handle, ", Bps=");
	thousands_grouping_ull(handle, rx_Bps);
	cmd_pr_info(handle, ", bps=");
	thousands_grouping_ull(handle, rx_bps);
	cmd_pr_info(handle, "\n");

	cmd_pr_info(handle, "[perf]\n");
	if (!test_bit(NETDEV_F_STARTED, &netdev->flags)) {
		ns = ktime_to_ns(ktime_sub(netdev->time_stop, netdev->time_start));
		do {
			seq = raw_read_seqcount_begin(&netdev->stats_seq);
			tx.pkts = netdev->tx_stop.pkts - netdev->tx_start.pkts;
			tx.bytes = netdev->tx_stop.bytes - netdev->tx_start.bytes;
			rx.pkts = netdev->rx_stop.pkts - netdev->rx_start.pkts;
			rx.bytes = netdev->rx_stop.bytes - netdev->rx_start.bytes;
		} while (read_seqcount_retry(&netdev->stats_seq, seq));
		tx_pps = calc_per_sec_value(tx.pkts, ns);
		tx_Bps = calc_per_sec_value(tx.bytes, ns);
		tx_bps = calc_per_sec_value(tx.pkts * 20 * 8 + tx.bytes * 8, ns);
		rx_pps = calc_per_sec_value(rx.pkts, ns);
		rx_Bps = calc_per_sec_value(rx.bytes, ns);
		rx_bps = (rx.pkts && rx.bytes) ? calc_per_sec_value(rx.pkts * 20 * 8 + rx.bytes * 8, ns) : 0;
		cmd_pr_info(handle, "Time=");
		thousands_grouping_ull(handle, ns);
		cmd_pr_info(handle, " ns\n<tx>\npkts=");
		thousands_grouping_ull(handle, tx.pkts);
		cmd_pr_info(handle, ", bytes=");
		thousands_grouping_ull(handle, tx.bytes);
		cmd_pr_info(handle, "\npps=");
		thousands_grouping_ull(handle, tx_pps);
		cmd_pr_info(handle, ", Bps=");
		thousands_grouping_ull(handle, tx_Bps);
		cmd_pr_info(handle, ", bps=");
		thousands_grouping_ull(handle, tx_bps);
		cmd_pr_info(handle, "\n<rx>\npkts=");
		thousands_grouping_ull(handle, rx.pkts);
		cmd_pr_info(handle, ", bytes=");
		thousands_grouping_ull(handle, rx.bytes);
		cmd_pr_info(handle, "\npps=");
		thousands_grouping_ull(handle, rx_pps);
		cmd_pr_info(handle, ", Bps=");
		thousands_grouping_ull(handle, rx_Bps);
		cmd_pr_info(handle, ", bps=");
		thousands_grouping_ull(handle, rx_bps);
		cmd_pr_info(handle, "\n");
	} else {
		cmd_pr_info(handle, "<tx> ...\n<rx> ...\n");
	}

	return;
}

static int netdev_cmd_stats(proto_handle_t *handle, proto_rxd_t *rxd, unsigned long param)
{
	netdev_t *netdev, *tmp;

	mutex_lock(&netdev_mutex);

	list_for_each_entry_safe(netdev, tmp, &netdev_list, node) {
		if (test_bit(NETDEV_F_ATTACHED, &netdev->flags)) {
			netdev_cmd_stats_single(handle, netdev);
		}
	}

	mutex_unlock(&netdev_mutex);

	return 0;
}

static int netdev_cmd_attach(proto_handle_t *handle, proto_rxd_t *rxd, unsigned long param)
{
	int rc;
	netdev_t *netdev;
	const char *arg_netdev;

	rxd->len = rxd->hdr.length;
	if (!(arg_netdev = proto_get_str(&rxd->buf, &rxd->len))) {
		rc = -EINVAL;
		goto err;
	}

	mutex_lock(&netdev_mutex);

	if (!(netdev = netdev_find_by_name(arg_netdev))) {
		cmd_pr_err(handle, "ERR: netdev \"%s\" not found\n", arg_netdev);
		rc = -ENODEV;
		goto err_unlock;
	}

	rtnl_lock();
	rc = netdev_attach(netdev);
	rtnl_unlock();

	mutex_unlock(&netdev_mutex);
	return 0;

err_unlock:
	mutex_unlock(&netdev_mutex);
err:
	return rc;
}

static int netdev_cmd_detach(proto_handle_t *handle, proto_rxd_t *rxd, unsigned long param)
{
	int rc;
	netdev_t *netdev;
	const char *arg_netdev;
	proto_rxd_t _rxd;

	_rxd = *rxd;
	if ((rc = netdev_cmd_stop(handle, &_rxd, 0))) {
		goto err;
	}

	rxd->len = rxd->hdr.length;
	if (!(arg_netdev = proto_get_str(&rxd->buf, &rxd->len))) {
		rc = -EINVAL;
		goto err;
	}

	mutex_lock(&netdev_mutex);

	if (!(netdev = netdev_find_by_name(arg_netdev))) {
		cmd_pr_err(handle, "ERR: netdev \"%s\" not found\n", arg_netdev);
		rc = -ENODEV;
		goto err_unlock;
	}

	rtnl_lock();
	netdev_detach(netdev);
	rtnl_unlock();

	mutex_unlock(&netdev_mutex);
	return 0;

err_unlock:
	mutex_unlock(&netdev_mutex);
err:
	return rc;
}

static int netdev_cmd_rxdump(proto_handle_t *handle, proto_rxd_t *rxd, unsigned long param)
{
	int rc;
	u32 u32;
	netdev_t *netdev;
	const char *arg_netdev;
	proto_rxd_t r;

	r = *rxd;
	r.len = r.hdr.length;
	if (!(arg_netdev = proto_get_str(&r.buf, &r.len))
	||  (proto_get_u32(&r.buf, &r.len, &u32) < 0)) {
		rc = -EINVAL;
		goto err;
	}

	mutex_lock(&netdev_mutex);

	if (!(netdev = netdev_find_by_name(arg_netdev))) {
		cmd_pr_err(handle, "ERR: netdev \"%s\" not found\n", arg_netdev);
		rc = -ENODEV;
		goto err_unlock;
	}

	if (!test_bit(NETDEV_F_ATTACHED, &netdev->flags)) {
		cmd_pr_err(handle, "ERR: netdev \"%s\" not attached\n", arg_netdev);
		rc = -ENODEV;
		goto err_unlock;
	}

	if (test_bit(NETDEV_F_STARTED, &netdev->flags)) {
		rc = -EBUSY;
		goto err_unlock;
	}

	if (u32) {
		set_bit(NETDEV_F_RXDUMP, &netdev->flags);
	} else {
		clear_bit(NETDEV_F_RXDUMP, &netdev->flags);
	}

	mutex_unlock(&netdev_mutex);
	return 0;

err_unlock:
	mutex_unlock(&netdev_mutex);
err:
	return rc;
}

static int netdev_cmd_devstats(proto_handle_t *handle, proto_rxd_t *rxd, unsigned long param)
{
	int rc;
	u32 u32;
	bool old;
	netdev_t *netdev;
	const char *arg_netdev;
	proto_rxd_t r;

	r = *rxd;
	r.len = r.hdr.length;
	if (!(arg_netdev = proto_get_str(&r.buf, &r.len))
	||  (proto_get_u32(&r.buf, &r.len, &u32) < 0)) {
		rc = -EINVAL;
		goto err;
	}

	mutex_lock(&netdev_mutex);

	if (!(netdev = netdev_find_by_name(arg_netdev))) {
		cmd_pr_err(handle, "ERR: netdev \"%s\" not found\n", arg_netdev);
		rc = -ENODEV;
		goto err_unlock;
	}

	if (!test_bit(NETDEV_F_ATTACHED, &netdev->flags)) {
		cmd_pr_err(handle, "ERR: netdev \"%s\" not attached\n", arg_netdev);
		rc = -ENODEV;
		goto err_unlock;
	}

	if (test_bit(NETDEV_F_STARTED, &netdev->flags)) {
		rc = -EBUSY;
		goto err_unlock;
	}

	if (u32) {
		old = test_and_set_bit(NETDEV_F_DEVSTATS, &netdev->flags);
	} else {
		old = test_and_clear_bit(NETDEV_F_DEVSTATS, &netdev->flags);
	}

	if (!!u32 ^ !!old) {
		set_bit(DW_F_CLR_STATS, &netdev->dwork_flags);
		flush_delayed_work(&netdev->dwork);
	}

	mutex_unlock(&netdev_mutex);
	return 0;

err_unlock:
	mutex_unlock(&netdev_mutex);
err:
	return rc;
}

static int netdev_cmd_ioctl(proto_handle_t *handle, proto_rxd_t *rxd, unsigned long param)
{
	int rc;
	netdev_t *netdev;
	const char *arg_netdev;

	rxd->len = rxd->hdr.length;
	if (!(arg_netdev = proto_get_str(&rxd->buf, &rxd->len))) {
		rc = -EINVAL;
		goto err;
	}

	mutex_lock(&netdev_mutex);

	if (!(netdev = netdev_find_by_name(arg_netdev))) {
		cmd_pr_err(handle, "ERR: netdev \"%s\" not found\n", arg_netdev);
		rc = -ENODEV;
		goto err_unlock;
	}

	if (!netdev->priv_ops || !netdev->priv_ops->netdev_ioctl) {
		rc = -ENODEV;
		goto err_unlock;
	}

	if ((rc = netdev->priv_ops->netdev_ioctl(netdev->ndev, handle, rxd))) {
		goto err_unlock;
	}

	mutex_unlock(&netdev_mutex);
	return 0;

err_unlock:
	mutex_unlock(&netdev_mutex);
err:
	return rc;
}

static cmd_t netdev_cmd[NETDEV_ID_MAX] = {
	[NETDEV_ID_NETDEV]   = {netdev_cmd_netdev,   0},
	[NETDEV_ID_PAUSE]    = {netdev_cmd_pause,    0},
	[NETDEV_ID_LINK]     = {netdev_cmd_link,     0},
	[NETDEV_ID_START]    = {netdev_cmd_start,    0},
	[NETDEV_ID_STOP]     = {netdev_cmd_stop,     0},
	[NETDEV_ID_CLEAR]    = {netdev_cmd_clear,    0},
	[NETDEV_ID_STATS]    = {netdev_cmd_stats,    0},
	[NETDEV_ID_ATTACH]   = {netdev_cmd_attach,   0},
	[NETDEV_ID_DETACH]   = {netdev_cmd_detach,   0},
	[NETDEV_ID_RXDUMP]   = {netdev_cmd_rxdump,   0},
	[NETDEV_ID_DEVSTATS] = {netdev_cmd_devstats, 0},
	[NETDEV_ID_IOCTL]    = {netdev_cmd_ioctl,    0},
};

static int netdev_cmd_fn(proto_handle_t *handle, proto_rxd_t *desc, unsigned long param)
{
	int rc;
	u8 id = param;
	cmd_fn_t *fn;

	if (id >= NETDEV_ID_MAX) {
		rc = -EINVAL;
		goto err;
	}

	if (!(fn = netdev_cmd[id].fn)) {
		rc = -ENOSYS;
		goto err;
	}

	return fn(handle, desc, netdev_cmd[id].param);
err:
	return rc;
}

int __init netdev_add_all(void)
{
	int rc;
	netdev_t *netdev, *tmp;
	unsigned int i;
	sb_skb_entry_t *e;
	struct {
		void **fn;
		const char *name;
	} ksyms_table[] = {
#define KSYM_TBL_ENTRY(x)	{(void **)&ksym_##x, #x}
		KSYM_TBL_ENTRY(phy_start_machine),
		KSYM_TBL_ENTRY(phy_stop_machine),
	};

	sb_skb_table_for_each_entry(e, 0, i) {
		seqcount_init(&e->seq);
		e->key_ndev = NULL;
		e->key_skbh_baddr = 0;
		e->skb = NULL;
		atomic_set_release(&e->used, 0);
	}

	for (i = 0; i < ARRAY_SIZE(ksyms_table); i++) {
		if (!(*ksyms_table[i].fn = (void *)kallsyms_lookup_name(ksyms_table[i].name))) {
			pr_err("Failed to get address of \"%s\"\n", ksyms_table[i].name);
			rc = -EFAULT;
			goto err;
		}
	}

	if ((rc = register_netdevice_notifier(&netdev_notifier_block))) {
		pr_err("Failed to register netdevice notifier\n");
		goto err;
	}

	if ((rc = cmd_fn_register(MAGIC_NETDEV, netdev_cmd_fn))) {
		pr_err("Failed to register command functions\n");
		goto err;
	}

	return 0;
err:
	rtnl_lock();
	mutex_lock(&netdev_mutex);

	list_for_each_entry_safe(netdev, tmp, &netdev_list, node) {
		netdev_detach(netdev);
		netdev_del(netdev);
	}

	mutex_unlock(&netdev_mutex);
	rtnl_unlock();
	return rc;
}

void netdev_del_all(void)
{
	int rc;
	netdev_t *netdev, *tmp;
	worker_op_t op;

	unregister_netdevice_notifier(&netdev_notifier_block);

	rtnl_lock();
	mutex_lock(&netdev_mutex);

	list_for_each_entry_safe(netdev, tmp, &netdev_list, node) {
		set_bit(DW_F_SET_STOP_STATS, &netdev->dwork_flags);
		flush_delayed_work(&netdev->dwork);
		set_bit(DW_F_SET_FORCE_STOP, &netdev->dwork_flags);
		flush_delayed_work(&netdev->dwork);
		op.opcode = WORKER_OP_STOP | WORKER_OP_F_PARALLEL;
		cpumask_copy(&op.cpumask, worker_cpumask);
		op.args[0] = netdev;
		rc = worker_op_post(&op);
		WARN_ON(rc);
		clear_bit(DW_F_SET_FORCE_STOP, &netdev->dwork_flags);
		clear_bit(NETDEV_F_STARTED, &netdev->flags);
		netdev_detach(netdev);
		netdev_del(netdev);
	}

	mutex_unlock(&netdev_mutex);
	rtnl_unlock();
}

/**
 * @brief	find netdev by given specified name
 */
netdev_t *sb_netdev_get_by_name(const char *name)
{
	netdev_t *netdev;

	mutex_lock(&netdev_mutex);

	netdev = netdev_find_by_name(name);

	mutex_unlock(&netdev_mutex);

	return netdev;
}

/**
 * @brief	Allocate a skb for specified netdev
 */
struct sk_buff *sb_netdev_alloc_skb(netdev_t *netdev, unsigned int len, gfp_t gfp_mask)
{
	struct sk_buff *skb;

	if (netdev->priv_ops) {
		skb = sb_alloc_skb(netdev->ndev, len + SB_SKB_PAD, gfp_mask);
	} else {
		BUILD_BUG_ON(SB_SKB_PAD < NET_SKB_PAD);
		skb = __netdev_alloc_skb(netdev->ndev, len + SB_SKB_PAD - NET_SKB_PAD, gfp_mask);
	}
	skb_reserve(skb, SB_SKB_PAD - skb_headroom(skb));

	return skb;
}

/**
 * @brief	Allocate a skb for specified private netdev
 */
void sb_netdev_kfree_skb(netdev_t *netdev, struct sk_buff *skb)
{
	if (netdev->priv_ops) {
		sb_kfree_skb(skb);
	} else {
		dev_kfree_skb(skb);
	}
}

/**
 * @brief	Return if netdev in filter list
 */
bool sb_netdev_in_filter(const char *name)
{
	char *s, *str, *dup = NULL;
	size_t n;

	if (!netdev_filter) {
		goto ok;
	}

	n = strlen(netdev_filter) + 1;
	if (!(dup = kmalloc(n, GFP_KERNEL)) || MTRACE_KMEM_ADD(dup)) {
		pr_err("%s(): failed to alloc memory\n", __func__);
		goto err;
	}
	memcpy(dup, netdev_filter, n);

	str = dup;
	while (str) {
		s = strsep(&str, ",");
		if (!strcmp(s, name)) {
			goto ok;
		}
	}
err:
	MTRACE_KMEM_DEL(dup);
	kfree(dup);
	return false;
ok:
	MTRACE_KMEM_DEL(dup);
	kfree(dup);
	return true;
}
EXPORT_SYMBOL(sb_netdev_in_filter);

/**
 * @brief	Allocate a net_device instance for private device
 */
struct net_device *sb_netdev_alloc(int sizeof_priv, unsigned int txqs, unsigned int rxqs)
{
	return alloc_etherdev_mqs(sizeof_priv, txqs, rxqs);
}
EXPORT_SYMBOL(sb_netdev_alloc);

/**
 * @brief	Free dummy net_device instance
 */
void sb_netdev_free(struct net_device *ndev)
{
	free_netdev(ndev);
}
EXPORT_SYMBOL(sb_netdev_free);

/**
 * @brief	Register a private net_device instance
 */
int sb_netdev_register(struct net_device *ndev)
{
	int rc;

	device_initialize(&ndev->dev);
	dev_set_name(&ndev->dev, "sb-%s", ndev->name);
	if ((rc = device_add(&ndev->dev))) {
		goto err;
	}

	netif_carrier_off(ndev);
	return 0;
err:
	return rc;
}
EXPORT_SYMBOL(sb_netdev_register);

/**
 * @brief	Unregister a private net_device instance
 */
void sb_netdev_unregister(struct net_device *ndev)
{
	device_del(&ndev->dev);
}
EXPORT_SYMBOL(sb_netdev_unregister);

/**
 * @brief	Receive handler. This routine should be called in private driver after receive frames
 */
void sb_netdev_receive(struct sk_buff *skb, struct net_device *ndev)
{
	netdev_t *netdev;

	netdev = dev_get_drvdata(&ndev->dev);
	netdev_receive(netdev, skb);
}
EXPORT_SYMBOL(sb_netdev_receive);

/**
 * @brief	Post a netdev event
 */
int sb_netdev_notify(struct net_device *ndev, unsigned long event, void *arg)
{
	int rc;

	rtnl_lock();
	rc = __netdev_notifier(ndev, event, arg);
	rtnl_unlock();

	return notifier_to_errno(rc);
}
EXPORT_SYMBOL(sb_netdev_notify);

/**
 * @brief	Allocate a skb for specified private netdev
 */
struct sk_buff *sb_alloc_skb(struct net_device *ndev, unsigned int len, gfp_t gfp_mask)
{
	u32 hash;
	void *vaddr;
	size_t sz, __sz;
	dma_addr_t baddr, __baddr;
	unsigned int i, tmp;
	struct device *dev;
	struct sk_buff *skb = NULL;
	sb_skb_entry_t *e;

	sz = SKB_TRUESIZE(SKB_DATA_ALIGN(len));
	__sz = sz + SKB_DATA_ALIGN(sizeof(baddr));
	dev = ndev->dev.parent;
	vaddr = dma_alloc_coherent(dev, __sz, &__baddr, gfp_mask);
	if (!vaddr || MTRACE_DMACOH_ADD(vaddr, dev, __sz, __baddr)) {
		goto err;
	}
	skb = vaddr + SKB_DATA_ALIGN(sizeof(baddr));
	if (!(skb = build_skb_around(skb, (void *)skb + SKB_DATA_ALIGN(sizeof(*skb)),
				     sz - SKB_DATA_ALIGN(sizeof(*skb))))) {
		goto err;
	}
	baddr = __baddr + SKB_DATA_ALIGN(sizeof(baddr)) + SKB_DATA_ALIGN(sizeof(*skb));
	skb->dev = ndev;
	*(dma_addr_t *)vaddr = baddr;

	hash = jhash(&ndev, sizeof(ndev), 0);
	i = jhash(&baddr, sizeof(baddr), hash);
	sb_skb_table_for_each_entry(e, i, tmp) {
		if (atomic_cmpxchg_acquire(&e->used, 0, 1)) {
			continue;
		}
		raw_write_seqcount_begin(&e->seq);
		e->key_ndev = ndev;
		e->key_skbh_baddr = baddr;
		e->skb = skb;
		raw_write_seqcount_end(&e->seq);
		break;
	}

	if (!e) {
		goto err;
	}
	return skb;
err:
	if (vaddr) {
		MTRACE_DMACOH_DEL(vaddr);
		dma_free_coherent(dev, __sz, vaddr, __baddr);
	}
	return NULL;
}
EXPORT_SYMBOL(sb_alloc_skb);

/**
 * @brief	Free a skb for specified private netdev
 */
void sb_kfree_skb(struct sk_buff *skb)
{
	u32 hash;
	void *vaddr;
	size_t sz;
	unsigned seq;
	dma_addr_t baddr, __baddr, key_skbh_baddr;
	unsigned int i, tmp;
	struct device *dev;
	struct net_device *ndev, *key_ndev;
	struct sb_skb_entry *e;

	if (skb_unref(skb)) {
		vaddr = (void *)skb - SKB_DATA_ALIGN(sizeof(baddr));
		sz = SKB_DATA_ALIGN(sizeof(baddr)) + skb->truesize;
		ndev = skb->dev;
		dev = ndev->dev.parent;
		baddr = *(dma_addr_t *)vaddr;
		__baddr = baddr - SKB_DATA_ALIGN(sizeof(baddr)) - SKB_DATA_ALIGN(sizeof(*skb));

		hash = jhash(&ndev, sizeof(ndev), 0);
		i = jhash(&baddr, sizeof(baddr), hash);
		sb_skb_table_for_each_entry(e, i, tmp) {
			if (!atomic_read_acquire(&e->used)) {
				continue;
			}
			do {
				seq = raw_read_seqcount_begin(&e->seq);
				key_ndev = e->key_ndev;
				key_skbh_baddr = e->key_skbh_baddr;
			} while (read_seqcount_retry(&e->seq, seq));
			if ((key_ndev == ndev) && (key_skbh_baddr == baddr)) {
				raw_write_seqcount_begin(&e->seq);
				e->key_ndev = NULL;
				e->key_skbh_baddr = 0;
				e->skb = NULL;
				raw_write_seqcount_end(&e->seq);
				atomic_set_release(&e->used, 0);
				break;
			}
		}

		if (!e) {
			netdev_err(ndev, "%s(): failed to find skb %lx\n", __func__, (unsigned long)skb);
		}
		MTRACE_DMACOH_DEL(vaddr);
		dma_free_coherent(dev, sz, vaddr, __baddr);
	}
}
EXPORT_SYMBOL(sb_kfree_skb);

/**
 * @brief	Get skb head from baddr of skb head for specified private netdev
 */
void *sb_baddr_to_skbh(struct net_device *ndev, dma_addr_t baddr)
{
	u32 hash;
	unsigned seq;
	dma_addr_t key_skbh_baddr;
	unsigned int i, tmp;
	struct sk_buff *skb;
	struct net_device *key_ndev;
	struct sb_skb_entry *e;

	hash = jhash(&ndev, sizeof(ndev), 0);
	i = jhash(&baddr, sizeof(baddr), hash);
	sb_skb_table_for_each_entry(e, i, tmp) {
		if (!atomic_read_acquire(&e->used)) {
			continue;
		}
		do {
			seq = raw_read_seqcount_begin(&e->seq);
			key_ndev = e->key_ndev;
			key_skbh_baddr = e->key_skbh_baddr;
			skb = e->skb;
		} while (read_seqcount_retry(&e->seq, seq));
		if ((key_ndev == ndev) && (key_skbh_baddr == baddr)) {
			break;
		}
	}

	if (!e) {
		goto err;
	}
	return skb->head;
err:
	return NULL;
}
EXPORT_SYMBOL(sb_baddr_to_skbh);
