/*
 * SPDX-License-Identifier: BSD-3-Clause
 * Copyright (c) Huawei Technologies Co., Ltd. 2021-2023. All rights reserved.
 * Description   : ether device interface
 */
#include <stddef.h>
#include <string.h>
#include <stdint.h>

#include "securec.h"

#include "udk_log.h"
#include "udk_common.h"
#include "udk_spinlock.h"
#include "udk_ether.h"
#include "udk_vdev.h"
#include "udk_malloc.h"
#include "udk_ethdev.h"

static struct {
    uint64_t next_owner_id;
    udk_spinlock_t ownership_lock;
    struct udk_eth_dev_data data[UDK_MAX_ETHPORTS];
} *udk_eth_dev_shared_data;

static udk_spinlock_t udk_eth_shared_data_lock = UDK_SPINLOCK_INITIALIZER;
struct udk_eth_dev udk_eth_devices[UDK_MAX_ETHPORTS];

struct udk_eth_xstats_name_off {
    char name[UDK_ETH_XSTATS_NAME_SIZE];
    uint32_t offset;
};

static const struct udk_eth_xstats_name_off udk_stats_strings[] = {
    {"rx_good_packets", offsetof(struct udk_eth_stats, ipackets)},
    {"tx_good_packets", offsetof(struct udk_eth_stats, opackets)},
    {"rx_good_bytes", offsetof(struct udk_eth_stats, ibytes)},
    {"tx_good_bytes", offsetof(struct udk_eth_stats, obytes)},
    {"rx_missed_errors", offsetof(struct udk_eth_stats, imissed)},
    {"rx_errors", offsetof(struct udk_eth_stats, ierrors)},
    {"tx_errors", offsetof(struct udk_eth_stats, oerrors)},
    {"rx_mbuf_allocation_errors", offsetof(struct udk_eth_stats, rx_nombuf)},
};

#define UDK_NB_STATS (sizeof(udk_stats_strings) / sizeof(udk_stats_strings[0]))

static const struct udk_eth_xstats_name_off udk_rxq_stats_strings[] = {
    {"packets", offsetof(struct udk_eth_stats, q_ipackets)},
    {"bytes", offsetof(struct udk_eth_stats, q_ibytes)},
    {"errors", offsetof(struct udk_eth_stats, q_errors)},
};

#define UDK_NB_RXQ_STATS (sizeof(udk_rxq_stats_strings) / sizeof(udk_rxq_stats_strings[0]))

static const struct udk_eth_xstats_name_off udk_txq_stats_strings[] = {
    {"packets", offsetof(struct udk_eth_stats, q_opackets)},
    {"bytes", offsetof(struct udk_eth_stats, q_obytes)},
};
#define UDK_NB_TXQ_STATS (sizeof(udk_txq_stats_strings) / sizeof(udk_txq_stats_strings[0]))

static int udk_eth_dev_rx_queue_config(struct udk_eth_dev *dev, uint16_t nb_queues)
{
    uint16_t old_nb_queues = dev->data->nb_rx_queues;
    void **rxq = NULL;
    uint32_t i;

    if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
        dev->data->rx_queues = udk_zmalloc(sizeof(dev->data->rx_queues[0]) * nb_queues, UDK_CACHE_LINE_SIZE);
        if (dev->data->rx_queues == NULL) {
            dev->data->nb_rx_queues = 0;
            return -ENOMEM;
        }
    } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
        UDK_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
        rxq = dev->data->rx_queues;

        for (i = nb_queues; i < old_nb_queues; i++) {
            (*dev->dev_ops->rx_queue_release)(rxq[i]);
        }

        udk_free(rxq);
        dev->data->rx_queues = udk_zmalloc(sizeof(dev->data->rx_queues[0]) * nb_queues, UDK_CACHE_LINE_SIZE);
        if (dev->data->rx_queues == NULL) {
            return -ENOMEM;
        }
    } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
        UDK_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);

        rxq = dev->data->rx_queues;
        for (i = nb_queues; i < old_nb_queues; i++) {
            (*dev->dev_ops->rx_queue_release)(rxq[i]);
        }

        udk_free(dev->data->rx_queues);
        dev->data->rx_queues = NULL;
    }
    dev->data->nb_rx_queues = nb_queues;
    return 0;
}

static int udk_eth_dev_tx_queue_config(struct udk_eth_dev *dev, uint16_t nb_queues)
{
    uint16_t old_nb_queues = dev->data->nb_tx_queues;
    void **txq = NULL;
    uint32_t i;

    if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
        dev->data->tx_queues = udk_zmalloc(sizeof(dev->data->tx_queues[0]) * nb_queues, UDK_CACHE_LINE_SIZE);
        if (dev->data->tx_queues == NULL) {
            dev->data->nb_tx_queues = 0;
            return -ENOMEM;
        }
    } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
        UDK_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
        txq = dev->data->tx_queues;

        for (i = nb_queues; i < old_nb_queues; i++) {
            (*dev->dev_ops->tx_queue_release)(txq[i]);
        }

        udk_free(txq);
        dev->data->tx_queues = udk_zmalloc(sizeof(dev->data->tx_queues[0]) * nb_queues, UDK_CACHE_LINE_SIZE);
        if (dev->data->tx_queues == NULL) {
            return -ENOMEM;
        }
    } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
        UDK_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);

        txq = dev->data->tx_queues;
        for (i = nb_queues; i < old_nb_queues; i++) {
            (*dev->dev_ops->tx_queue_release)(txq[i]);
        }

        udk_free(dev->data->tx_queues);
        dev->data->tx_queues = NULL;
    }
    dev->data->nb_tx_queues = nb_queues;
    return 0;
}

static int udk_eth_dev_is_removed(uint16_t port_id)
{
    struct udk_eth_dev *dev = NULL;

    if (udk_eth_dev_is_valid_port(port_id) == 0) {
        UDK_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id);
        return 0;
    }

    dev = &udk_eth_devices[port_id];
    if (dev->state == UDK_ETH_DEV_REMOVED) {
        return 1;
    }

    return 0;
}

static int udk_eth_err(uint16_t port_id, int ret)
{
    if (ret == 0) {
        return 0;
    }

    if (udk_eth_dev_is_removed(port_id) != 0) {
        return -EIO;
    }

    return ret;
}

int udk_eth_dev_info_get(uint16_t port_id, struct udk_eth_dev_info *dev_info)
{
    struct udk_eth_dev *dev = NULL;
    const struct udk_eth_desc_lim lim = {
        .nb_max = 0xffff,
        .nb_min = 0,
        .nb_align = 1,
    };
    int diag;

    (void)memset_s(dev_info, sizeof(*dev_info), 0, sizeof(*dev_info));

    if (udk_eth_dev_is_valid_port(port_id) == 0) {
        UDK_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id);
        return -ENODEV;
    }
    dev = &udk_eth_devices[port_id];

    dev_info->rx_desc_lim = lim;
    dev_info->tx_desc_lim = lim;
    dev_info->min_mtu = UDK_ETHER_MIN_MTU;
    dev_info->max_mtu = 0xffff;

    UDK_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
    diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
    if (diag != 0) {
        /* Cleanup already filled in device information */
        (void)memset_s(dev_info, sizeof(*dev_info), 0, sizeof(*dev_info));
        return udk_eth_err(port_id, diag);
    }
    dev_info->max_rx_queues = UDK_MIN(dev_info->max_rx_queues, (uint16_t)UDK_MAX_QUEUES_PER_PORT);
    dev_info->max_tx_queues = UDK_MIN(dev_info->max_tx_queues, (uint16_t)UDK_MAX_QUEUES_PER_PORT);
    dev_info->nb_rx_queues = dev->data->nb_rx_queues;
    dev_info->nb_tx_queues = dev->data->nb_tx_queues;
    dev_info->dev_flags = &dev->data->dev_flags;

    return 0;
}

int udk_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, const struct udk_eth_conf *dev_conf)
{
    struct udk_eth_dev *dev = NULL;
    struct udk_eth_dev_info dev_info;
    struct udk_eth_conf orig_conf;
    int diag;
    int ret;

    if (udk_eth_dev_is_valid_port(port_id) == 0) {
        UDK_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id);
        return -EINVAL;
    }

    dev = &udk_eth_devices[port_id];
    UDK_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);

    if (dev->data->dev_started) {
#ifdef UDK_HOT_REPLACE
        if (udk_process_type() != UDK_PROC_PRIMARY) {
            return 0;
        }
#endif
        UDK_ETHDEV_LOG(ERR, "Port %u must be stopped to allow configuration\n", port_id);
        return -EBUSY;
    }

    (void)memcpy_s(&orig_conf, sizeof(orig_conf), &dev->data->dev_conf, sizeof(dev->data->dev_conf));
    if (dev_conf != &dev->data->dev_conf) {
        (void)memcpy_s(&dev->data->dev_conf, sizeof(dev->data->dev_conf), dev_conf, sizeof(*dev_conf));
    }

    ret = udk_eth_dev_info_get(port_id, &dev_info);
    if (ret != 0) {
        goto rollback;
    }

    if (nb_rx_q == 0 && nb_tx_q == 0) {
        nb_rx_q = dev_info.default_rxportconf.nb_queues;
        if (nb_rx_q == 0) {
            nb_rx_q = UDK_ETH_DEV_FALLBACK_RX_NBQUEUES;
        }
        nb_tx_q = dev_info.default_txportconf.nb_queues;
        if (nb_tx_q == 0) {
            nb_tx_q = UDK_ETH_DEV_FALLBACK_TX_NBQUEUES;
        }
    }

    if (nb_rx_q > UDK_MAX_QUEUES_PER_PORT) {
        UDK_ETHDEV_LOG(ERR, "Number of RX queues requested (%u) is greater than max supported(%d)\n",
            nb_rx_q, UDK_MAX_QUEUES_PER_PORT);
        ret = -EINVAL;
        goto rollback;
    }

    if (nb_tx_q > UDK_MAX_QUEUES_PER_PORT) {
        UDK_ETHDEV_LOG(ERR, "Number of TX queues requested (%u) is greater than max supported(%d)\n",
            nb_tx_q, UDK_MAX_QUEUES_PER_PORT);
        ret = -EINVAL;
        goto rollback;
    }

    if (nb_rx_q > dev_info.max_rx_queues) {
        UDK_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n", port_id, nb_rx_q, dev_info.max_rx_queues);
        ret = -EINVAL;
        goto rollback;
    }

    if (nb_tx_q > dev_info.max_tx_queues) {
        UDK_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n", port_id, nb_tx_q, dev_info.max_tx_queues);
        ret = -EINVAL;
        goto rollback;
    }

    /*
     * If jumbo frames are enabled, check that the maximum RX packet
     * length is supported by the configured device.
     */
    if (dev_conf->rxmode.offloads & UDK_ETH_DEV_RX_OFFLOAD_JUMBO_FRAME) {
        if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
            UDK_ETHDEV_LOG(ERR, "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
                port_id, dev_conf->rxmode.max_rx_pkt_len, dev_info.max_rx_pktlen);
            ret = -EINVAL;
            goto rollback;
        } else if (dev_conf->rxmode.max_rx_pkt_len < UDK_ETHER_MIN_LEN) {
            UDK_ETHDEV_LOG(ERR, "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
                port_id, dev_conf->rxmode.max_rx_pkt_len, (unsigned int)UDK_ETHER_MIN_LEN);
            ret = -EINVAL;
            goto rollback;
        }
    } else {
        if (dev_conf->rxmode.max_rx_pkt_len < UDK_ETHER_MIN_LEN || dev_conf->rxmode.max_rx_pkt_len > UDK_ETHER_MAX_LEN)
            /* Use default value */
            dev->data->dev_conf.rxmode.max_rx_pkt_len = UDK_ETHER_MAX_LEN;
    }

    diag = udk_eth_dev_rx_queue_config(dev, nb_rx_q);
    if (diag != 0) {
        UDK_ETHDEV_LOG(ERR, "Port%u udk_eth_dev_rx_queue_config = %d\n", port_id, diag);
        ret = diag;
        goto rollback;
    }

    diag = udk_eth_dev_tx_queue_config(dev, nb_tx_q);
    if (diag != 0) {
        UDK_ETHDEV_LOG(ERR, "Port%u udk_eth_dev_tx_queue_config = %d\n", port_id, diag);
        (void)udk_eth_dev_rx_queue_config(dev, 0);
        ret = diag;
        goto rollback;
    }

    diag = (*dev->dev_ops->dev_configure)(dev);
    if (diag != 0) {
        UDK_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n", port_id, diag);
        ret = udk_eth_err(port_id, diag);
        goto reset_queues;
    }

    UDK_ETHDEV_LOG(INFO, "Slave %u: dev configure succeed.\n", port_id);
    return 0;
reset_queues:
    (void)udk_eth_dev_rx_queue_config(dev, 0);
    (void)udk_eth_dev_tx_queue_config(dev, 0);
rollback:
    (void)memcpy_s(&dev->data->dev_conf, sizeof(dev->data->dev_conf), &orig_conf, sizeof(orig_conf));

    return ret;
}

static int udk_eth_dev_shared_data_prepare(void)
{
    const uint32_t flags = 0;
    const struct udk_memzone *mz = NULL;

    udk_spinlock_lock(&udk_eth_shared_data_lock);

    if (udk_eth_dev_shared_data == NULL) {
#ifdef UDK_HOT_REPLACE
        mz = udk_memzone_lookup(MZ_UDK_ETH_DEV_DATA);
        if (mz == NULL) {
            mz = udk_memzone_reserve(MZ_UDK_ETH_DEV_DATA, sizeof(*udk_eth_dev_shared_data), UDK_SOCKET_ID_ANY, flags);
            if (mz != NULL) {
                (void)memset_s(mz->addr, sizeof(*udk_eth_dev_shared_data), 0, sizeof(*udk_eth_dev_shared_data));
            }
        }
#else
        if (udk_process_type() == UDK_PROC_PRIMARY) {
            /* Allocate port data and ownership shared memory. */
            mz = udk_memzone_reserve(MZ_UDK_ETH_DEV_DATA, sizeof(*udk_eth_dev_shared_data), flags);
        } else {
            mz = udk_memzone_lookup(MZ_UDK_ETH_DEV_DATA);
        }
#endif
        if (mz == NULL) {
            UDK_ETHDEV_LOG(ERR, "Cannot allocate ethdev shared data\n");
            udk_spinlock_unlock(&udk_eth_shared_data_lock);
            return -ENOMEM;
        }

        udk_eth_dev_shared_data = mz->addr;
        if (udk_process_type() == UDK_PROC_PRIMARY) {
            udk_eth_dev_shared_data->next_owner_id = UDK_ETH_DEV_NO_OWNER + 1;
            udk_spinlock_init(&udk_eth_dev_shared_data->ownership_lock);
            (void)memset_s(udk_eth_dev_shared_data->data, sizeof(udk_eth_dev_shared_data->data),
                0, sizeof(udk_eth_dev_shared_data->data));
        }
    }

    udk_spinlock_unlock(&udk_eth_shared_data_lock);
    return 0;
}

static uint16_t udk_eth_dev_find_free_port(void)
{
    uint16_t i;

    for (i = 0; i < UDK_MAX_ETHPORTS; i++) {
        /* Using shared name field to find a free port. */
        if (udk_eth_dev_shared_data->data[i].name[0] == '\0') {
            UDK_ASSERT(udk_eth_devices[i].state == UDK_ETH_DEV_UNUSED);
            return i;
        }
    }
    return UDK_MAX_ETHPORTS;
}

static struct udk_eth_dev *udk_eth_dev_allocated_unsafe(const char *name)
{
    uint32_t i;

    for (i = 0; i < UDK_MAX_ETHPORTS; i++) {
        if (udk_eth_devices[i].data != NULL && strcmp(udk_eth_devices[i].data->name, name) == 0) {
            return &udk_eth_devices[i];
        }
    }
    return NULL;
}

struct udk_eth_dev *udk_eth_dev_allocated(const char *name)
{
    struct udk_eth_dev *ethdev = NULL;
    int ret;

    ret = udk_eth_dev_shared_data_prepare();
    if (ret != 0) {
        return NULL;
    }

    udk_spinlock_lock(&udk_eth_dev_shared_data->ownership_lock);
    ethdev = udk_eth_dev_allocated_unsafe(name);
    udk_spinlock_unlock(&udk_eth_dev_shared_data->ownership_lock);

    return ethdev;
}

static struct udk_eth_dev *udk_eth_dev_get(uint16_t port_id)
{
    struct udk_eth_dev *eth_dev = &udk_eth_devices[port_id];

    eth_dev->data = &udk_eth_dev_shared_data->data[port_id];
    return eth_dev;
}

static int udk_eth_dev_data_allocated(const char *name, uint16_t *port_id)
{
    uint16_t i;

    if (udk_eth_dev_shared_data == NULL) {
        return -EINVAL;
    }

    if (name == NULL) {
        UDK_ETHDEV_LOG(DEBUG, "Null pointer is specified\n");
        return -EINVAL;
    }

    *port_id = UDK_MAX_ETHPORTS;
    for (i = 0; i < UDK_MAX_ETHPORTS; i++) {
        if (!strncmp(name, udk_eth_dev_shared_data->data[i].name, UDK_DEV_NAME_MAX_LEN)) {
            *port_id = i;
            return 0;
        }
    }
    return -ENODEV;
}

static struct udk_eth_dev *udk_eth_dev_attach_secondary(const char *name)
{
    uint16_t i;
    struct udk_eth_dev *eth_dev = NULL;
    int ret;

    ret = udk_eth_dev_shared_data_prepare();
    if (ret != 0) {
        return NULL;
    }

    /* Synchronize port attachment to primary port creation and release. */
    udk_spinlock_lock(&udk_eth_dev_shared_data->ownership_lock);

    for (i = 0; i < UDK_MAX_ETHPORTS; i++) {
        if (strcmp(udk_eth_dev_shared_data->data[i].name, name) == 0) {
            break;
        }
    }
    if (i == UDK_MAX_ETHPORTS) {
        UDK_ETHDEV_LOG(ERR, "Device %s is not driven by the primary process\n", name);
    } else {
        eth_dev = udk_eth_dev_get(i);
        UDK_ASSERT(eth_dev->data->port_id == i);
    }

    udk_spinlock_unlock(&udk_eth_dev_shared_data->ownership_lock);
    return eth_dev;
}

struct udk_eth_dev *udk_eth_dev_allocate(const char *name)
{
    uint16_t port_id;
#ifdef UDK_HOT_REPLACE
    uint16_t orig_port_id;
#endif
    struct udk_eth_dev *eth_dev = NULL;
    size_t name_len;
    int ret;

    name_len = strnlen(name, UDK_DEV_NAME_MAX_LEN);
    if (name_len == 0) {
        UDK_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
        return NULL;
    }

    if (name_len >= UDK_DEV_NAME_MAX_LEN) {
        UDK_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
        return NULL;
    }

    ret = udk_eth_dev_shared_data_prepare();
    if (ret != 0) {
        return NULL;
    }

    /* Synchronize port creation between primary and secondary threads. */
    udk_spinlock_lock(&udk_eth_dev_shared_data->ownership_lock);

    if (udk_eth_dev_allocated_unsafe(name) != NULL) {
        UDK_ETHDEV_LOG(ERR, "Ethernet device with name %s already allocated\n", name);
        goto unlock;
    }

    port_id = udk_eth_dev_find_free_port();
    if (port_id == UDK_MAX_ETHPORTS) {
        UDK_ETHDEV_LOG(ERR, "Reached maximum number of Ethernet ports\n");
        goto unlock;
    }

#ifdef UDK_HOT_REPLACE
    if (udk_eth_dev_data_allocated(name, &orig_port_id) == 0) {
        udk_spinlock_unlock(&udk_eth_dev_shared_data->ownership_lock);
        return udk_eth_dev_attach_secondary(name);
    }
#endif

    eth_dev = udk_eth_dev_get(port_id);
    ret = strcpy_s(eth_dev->data->name, sizeof(eth_dev->data->name), name);
    if (ret != EOK) {
        goto unlock;
    }
    eth_dev->data->port_id = port_id;
    eth_dev->data->mtu = UDK_ETHER_MTU;

unlock:
    udk_spinlock_unlock(&udk_eth_dev_shared_data->ownership_lock);

    return eth_dev;
}

void udk_eth_dev_probing_finish(struct udk_eth_dev *dev)
{
    if (dev == NULL) {
        return;
    }

    dev->state = UDK_ETH_DEV_ATTACHED;
}

int udk_eth_dev_is_valid_port(uint16_t port_id)
{
    if (port_id >= UDK_MAX_ETHPORTS || (udk_eth_devices[port_id].state == UDK_ETH_DEV_UNUSED)) {
        return 0;
    } else {
        return 1;
    }
}

int udk_eth_dev_socket_id(uint16_t port_id)
{
    if (!udk_eth_dev_is_valid_port(port_id)) {
        UDK_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id);
        return UDK_SOCKET_ID_ANY;
    }
    return udk_eth_devices[port_id].data->numa_node;
}

int udk_eth_macaddr_get(uint16_t port_id, struct udk_ether_addr *mac_addr)
{
    struct udk_eth_dev *dev = NULL;

    if (!udk_eth_dev_is_valid_port(port_id)) {
        UDK_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id);
        return -ENODEV;
    }

    dev = &udk_eth_devices[port_id];
    udk_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);

    return 0;
}

static uint16_t udk_eth_find_next(uint16_t port_id)
{
    while (port_id < UDK_MAX_ETHPORTS && udk_eth_devices[port_id].state == UDK_ETH_DEV_UNUSED) {
        port_id++;
    }

    if (port_id >= UDK_MAX_ETHPORTS) {
        return UDK_MAX_ETHPORTS;
    }

    return port_id;
}

int udk_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
{
    uint16_t pid;

    if (name == NULL) {
        UDK_ETHDEV_LOG(ERR, "Null pointer is specified\n");
        return -EINVAL;
    }

    for (pid = udk_eth_find_next(0); pid < UDK_MAX_ETHPORTS; pid = udk_eth_find_next(pid + 1)) {
        if (!strcmp(name, udk_eth_dev_shared_data->data[pid].name)) {
            *port_id = pid;
            return 0;
        }
    }
    return -ENODEV;
}

int udk_eth_dev_start(uint16_t port_id)
{
    struct udk_eth_dev *dev = NULL;
    struct udk_eth_dev_info dev_info;
    int ret;

    if (!udk_eth_dev_is_valid_port(port_id)) {
        UDK_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id);
        return -ENODEV;
    }

    dev = &udk_eth_devices[port_id];
    UDK_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
    if (dev->data->dev_started != 0) {
        UDK_ETHDEV_LOG(INFO, "Device with port_id=%u already started\n", port_id);
        return 0;
    }

    ret = udk_eth_dev_info_get(port_id, &dev_info);
    if (ret != 0) {
        return ret;
    }

    ret = (*dev->dev_ops->dev_start)(dev);
    if (ret == 0) {
        dev->data->dev_started = 1;
    } else {
        return udk_eth_err(port_id, ret);
    }

    UDK_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
    (void)(*dev->dev_ops->link_update)(dev, 0);

    UDK_ETHDEV_LOG(INFO, "[eth_dev_ops] Slave %u: start dev. \n", port_id);
    return 0;
}

void udk_eth_dev_close(uint16_t port_id)
{
    struct udk_eth_dev *dev = NULL;

    if (!udk_eth_dev_is_valid_port(port_id)) {
        UDK_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id);
        return;
    }

    dev = &udk_eth_devices[port_id];
    UDK_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
    dev->data->dev_started = 0;
    (*dev->dev_ops->dev_close)(dev);

    dev->data->nb_rx_queues = 0;
    udk_free(dev->data->rx_queues);
    dev->data->rx_queues = NULL;
    dev->data->nb_tx_queues = 0;
    udk_free(dev->data->tx_queues);
    dev->data->tx_queues = NULL;
    UDK_ETHDEV_LOG(INFO, "[eth_dev_ops] Slave %u: close dev and release resources. \n", port_id);
}

int udk_eth_upcall_queue_setup_pre(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id)
{
    int ret;
    struct udk_eth_dev *dev = NULL;
    struct udk_eth_dev_info dev_info;
    void **rxq = NULL;

    if (!udk_eth_dev_is_valid_port(port_id)) {
        UDK_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id);
        return -EINVAL;
    }

    dev = &udk_eth_devices[port_id];

    UDK_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->upcall_queue_setup_pre, -ENOTSUP);
    ret = udk_eth_dev_info_get(port_id, &dev_info);
    if (ret != 0) {
        return ret;
    }

    /* Use default specified by driver, if nb_rx_desc is zero */
    if (nb_rx_desc == 0) {
        nb_rx_desc = dev_info.default_rxportconf.ring_size;
        /* If driver default is also zero, fall back on default */
        if (nb_rx_desc == 0) {
            nb_rx_desc = UDK_ETH_DEV_FALLBACK_RX_RINGSIZE;
        }
    }

    if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
        nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
        nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
        UDK_ETHDEV_LOG(ERR, "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
            nb_rx_desc, dev_info.rx_desc_lim.nb_max, dev_info.rx_desc_lim.nb_min, dev_info.rx_desc_lim.nb_align);
        return -EINVAL;
    }

    rxq = dev->data->rx_queues;
    if (rxq[rx_queue_id]) {
        UDK_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
        (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
        rxq[rx_queue_id] = NULL;
    }

    ret = (*dev->dev_ops->upcall_queue_setup_pre)(dev, rx_queue_id, nb_rx_desc, socket_id);
    return udk_eth_err(port_id, ret);
}

int udk_eth_upcall_queue_setup(uint16_t port_id, uint16_t rx_queue_id, struct udk_mempool *mp)
{
    int ret;
    int mbp_buf_size;
    struct udk_eth_dev *dev = NULL;
    struct udk_eth_dev_info dev_info;

    if (!udk_eth_dev_is_valid_port(port_id)) {
        UDK_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id);
        return -EINVAL;
    }

    dev = &udk_eth_devices[port_id];
    if (rx_queue_id >= dev->data->nb_rx_queues) {
        UDK_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
        return -EINVAL;
    }

    if (dev->data->rx_queue_state[rx_queue_id] == UDK_ETH_QUEUE_STATE_STARTED) {
        return 0;
    }

    if (mp == NULL) {
        UDK_ETHDEV_LOG(ERR, "Invalid null mempool pointer\n");
        return -EINVAL;
    }

    UDK_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->upcall_queue_setup, -ENOTSUP);
    ret = udk_eth_dev_info_get(port_id, &dev_info);
    if (ret != 0) {
        return ret;
    }

    if (mp->private_data_size < sizeof(struct udk_pktmbuf_pool_private)) {
        UDK_ETHDEV_LOG(ERR, "%s private_data_size %d < %d\n", mp->name, (int)mp->private_data_size,
            (int)sizeof(struct udk_pktmbuf_pool_private));
        return -ENOSPC;
    }

    mbp_buf_size = udk_pktmbuf_data_room_size(mp);
    if ((mbp_buf_size - UDK_PKTMBUF_HEADROOM) < (int)dev_info.min_rx_bufsize) {
        UDK_ETHDEV_LOG(ERR, "%s mbuf_data_room_size %d < %d (UDK_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)=%d)\n",
            mp->name, mbp_buf_size, (int)(UDK_PKTMBUF_HEADROOM + dev_info.min_rx_bufsize),
            (int)UDK_PKTMBUF_HEADROOM, (int)dev_info.min_rx_bufsize);
        return -EINVAL;
    }

    ret = (*dev->dev_ops->upcall_queue_setup)(dev, rx_queue_id, mp);
    if (ret == 0) {
        dev->data->rx_queue_state[rx_queue_id] = UDK_ETH_QUEUE_STATE_STARTED;
    }

    return udk_eth_err(port_id, ret);
}

int udk_eth_upcall_queue_release(uint16_t port_id, uint16_t rx_queue_id)
{
    struct udk_eth_dev *dev = NULL;
    int ret;

    if (!udk_eth_dev_is_valid_port(port_id)) {
        UDK_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id);
        return -EINVAL;
    }

    dev = &udk_eth_devices[port_id];
    if (rx_queue_id >= dev->data->nb_rx_queues) {
        UDK_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
        return -EINVAL;
    }

    ret = (*dev->dev_ops->upcall_queue_release)(dev, rx_queue_id);
    if (ret == 0) {
        dev->data->rx_queue_state[rx_queue_id] = UDK_ETH_QUEUE_STATE_STOPPED;
    }

    return udk_eth_err(port_id, ret);
}

int udk_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id,
    const struct udk_eth_rxconf *rx_conf, struct udk_mempool *mp)
{
    int ret;
    int mbp_buf_size;
    struct udk_eth_dev *dev = NULL;
    struct udk_eth_dev_info dev_info;
    struct udk_eth_rxconf local_conf;
    void **rxq = NULL;

    if (!udk_eth_dev_is_valid_port(port_id)) {
        UDK_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id);
        return -EINVAL;
    }

    dev = &udk_eth_devices[port_id];
    if (rx_queue_id >= dev->data->nb_rx_queues) {
        UDK_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
        return -EINVAL;
    }

    if (mp == NULL) {
        UDK_ETHDEV_LOG(ERR, "Invalid null mempool pointer\n");
        return -EINVAL;
    }

#ifdef UDK_HOT_REPLACE
    if (dev->data->dev_started) {
        /* Just return 0 to allow called in secondary process. */
        if (udk_process_type() != UDK_PROC_PRIMARY) {
            return 0;
        }
        UDK_ETHDEV_LOG(DEBUG, "port %u must be stopped to allow configuration\n", port_id);
    }
#endif

    UDK_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
    ret = udk_eth_dev_info_get(port_id, &dev_info);
    if (ret != 0) {
        return ret;
    }

    if (mp->private_data_size < sizeof(struct udk_pktmbuf_pool_private)) {
        UDK_ETHDEV_LOG(ERR, "%s private_data_size %d < %d\n", mp->name, (int)mp->private_data_size,
            (int)sizeof(struct udk_pktmbuf_pool_private));
        return -ENOSPC;
    }

    mbp_buf_size = udk_pktmbuf_data_room_size(mp);
    if ((mbp_buf_size - UDK_PKTMBUF_HEADROOM) < (int)dev_info.min_rx_bufsize) {
        UDK_ETHDEV_LOG(ERR, "%s mbuf_data_room_size %d < %d (UDK_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)=%d)\n",
            mp->name, mbp_buf_size, (int)(UDK_PKTMBUF_HEADROOM + dev_info.min_rx_bufsize),
            (int)UDK_PKTMBUF_HEADROOM, (int)dev_info.min_rx_bufsize);
        return -EINVAL;
    }

    /* Use default specified by driver, if nb_rx_desc is zero */
    if (nb_rx_desc == 0) {
        nb_rx_desc = dev_info.default_rxportconf.ring_size;
        /* If driver default is also zero, fall back on default */
        if (nb_rx_desc == 0) {
            nb_rx_desc = UDK_ETH_DEV_FALLBACK_RX_RINGSIZE;
        }
    }

    if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
        nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
        nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
        UDK_ETHDEV_LOG(ERR, "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
            nb_rx_desc, dev_info.rx_desc_lim.nb_max, dev_info.rx_desc_lim.nb_min, dev_info.rx_desc_lim.nb_align);
        return -EINVAL;
    }

    if (dev->data->dev_started && (dev->data->rx_queue_state[rx_queue_id] != UDK_ETH_QUEUE_STATE_STOPPED)) {
        return -EBUSY;
    }

    rxq = dev->data->rx_queues;
    if (rxq[rx_queue_id]) {
        UDK_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
        (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
        rxq[rx_queue_id] = NULL;
    }

    if (rx_conf == NULL) {
        rx_conf = &dev_info.default_rxconf;
    }

    local_conf = *rx_conf;
    local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
    ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, socket_id, &local_conf, mp);
    if (ret == 0) {
        if (!dev->data->min_rx_buf_size || dev->data->min_rx_buf_size > (uint32_t)mbp_buf_size) {
            dev->data->min_rx_buf_size = (uint32_t)mbp_buf_size;
        }
    }

    return udk_eth_err(port_id, ret);
}

int udk_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id,
    const struct udk_eth_txconf *tx_conf)
{
    struct udk_eth_dev *dev = NULL;
    struct udk_eth_dev_info dev_info;
    struct udk_eth_txconf local_conf;
    void **txq = NULL;
    int ret;

    if (!udk_eth_dev_is_valid_port(port_id)) {
        UDK_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id);
        return -EINVAL;
    }

    dev = &udk_eth_devices[port_id];
    if (tx_queue_id >= dev->data->nb_tx_queues) {
        UDK_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
        return -EINVAL;
    }

#ifdef UDK_HOT_REPLACE
    if (dev->data->dev_started) {
        /* Just return 0 to allow called in secondary process. */
        if (udk_process_type() != UDK_PROC_PRIMARY) {
            return 0;
        }
        UDK_ETHDEV_LOG(DEBUG, "port %u must be stopped to allow configuration\n", port_id);
    }
#endif

    UDK_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
    ret = udk_eth_dev_info_get(port_id, &dev_info);
    if (ret != 0) {
        return ret;
    }
    /* Use default specified by driver, if nb_tx_desc is zero */
    if (nb_tx_desc == 0) {
        nb_tx_desc = dev_info.default_txportconf.ring_size;
        /* If driver default is zero, fall back on default */
        if (nb_tx_desc == 0) {
            nb_tx_desc = UDK_ETH_DEV_FALLBACK_TX_RINGSIZE;
        }
    }
    if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
        nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
        nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
        UDK_ETHDEV_LOG(ERR, "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
            nb_tx_desc, dev_info.tx_desc_lim.nb_max, dev_info.tx_desc_lim.nb_min, dev_info.tx_desc_lim.nb_align);
        return -EINVAL;
    }

    if (dev->data->dev_started && (dev->data->tx_queue_state[tx_queue_id] != UDK_ETH_QUEUE_STATE_STOPPED)) {
        return -EBUSY;
    }

    txq = dev->data->tx_queues;
    if (txq[tx_queue_id]) {
        UDK_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
        (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
        txq[tx_queue_id] = NULL;
    }

    if (tx_conf == NULL) {
        tx_conf = &dev_info.default_txconf;
    }

    local_conf = *tx_conf;
    local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;

    ret = (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc, socket_id, &local_conf);
    return udk_eth_err(port_id, ret);
}

static int udk_eth_basic_stats_get_names(struct udk_eth_dev *dev, struct udk_eth_xstat_name *xstats_names)
{
    int cnt_used_entries = 0;
    uint32_t idx, id_queue;
    uint16_t num_q;
    int ret = 0;

    for (idx = 0; idx < UDK_NB_STATS; idx++) {
        if (strcpy_s(xstats_names[cnt_used_entries].name, sizeof(xstats_names[0].name),
            udk_stats_strings[idx].name) != 0) {
            UDK_ETHDEV_LOG(WARNING, "copy stats name fail\n");
        }
        cnt_used_entries++;
    }
    num_q = UDK_MIN(dev->data->nb_rx_queues, (uint16_t)UDK_ETHDEV_QUEUE_STAT_CNTRS);
    for (id_queue = 0; id_queue < num_q; id_queue++) {
        for (idx = 0; idx < UDK_NB_RXQ_STATS; idx++) {
            ret = snprintf_s(xstats_names[cnt_used_entries].name, sizeof(xstats_names[0].name),
                sizeof(xstats_names[0].name) - 1, "rx_q%u%s", id_queue, udk_rxq_stats_strings[idx].name);
            if (ret < 0) {
                UDK_ETHDEV_LOG(WARNING, "snprintf udk rcq stat fail %d %d\n", ret, (int)sizeof(xstats_names[0].name));
            }

            cnt_used_entries++;
        }
    }
    num_q = UDK_MIN(dev->data->nb_tx_queues, (uint16_t)UDK_ETHDEV_QUEUE_STAT_CNTRS);
    for (id_queue = 0; id_queue < num_q; id_queue++) {
        for (idx = 0; idx < UDK_NB_TXQ_STATS; idx++) {
            ret = snprintf_s(xstats_names[cnt_used_entries].name, sizeof(xstats_names[0].name),
                sizeof(xstats_names[0].name) - 1, "tx_q%u%s", id_queue, udk_txq_stats_strings[idx].name);
            if (ret < 0) {
                UDK_ETHDEV_LOG(WARNING, "snprintf udk rcq stat fail %d %d\n", ret, (int)sizeof(xstats_names[0].name));
            }

            cnt_used_entries++;
        }
    }
    return cnt_used_entries;
}

static inline int udk_get_xstats_basic_count(struct udk_eth_dev *dev)
{
    uint16_t nb_rxqs, nb_txqs;
    int count;

    nb_rxqs = UDK_MIN(dev->data->nb_rx_queues, (uint16_t)UDK_ETHDEV_QUEUE_STAT_CNTRS);
    nb_txqs = UDK_MIN(dev->data->nb_tx_queues, (uint16_t)UDK_ETHDEV_QUEUE_STAT_CNTRS);

    count = UDK_NB_STATS;
    count += (int)(nb_rxqs * UDK_NB_RXQ_STATS);
    count += (int)(nb_txqs * UDK_NB_TXQ_STATS);

    return count;
}

static int udk_get_xstats_count(uint16_t port_id)
{
    struct udk_eth_dev *dev = NULL;
    int count;

    if (!udk_eth_dev_is_valid_port(port_id)) {
        UDK_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id);
        return -EINVAL;
    }

    dev = &udk_eth_devices[port_id];
    if (dev->dev_ops->xstats_get_names != NULL) {
        count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
        if (count < 0) {
            return udk_eth_err(port_id, count);
        }
    } else {
        count = 0;
    }

    count += udk_get_xstats_basic_count(dev);
    return count;
}

int udk_eth_xstats_get_names(uint16_t port_id, struct udk_eth_xstat_name *xstats_names, unsigned int size)
{
    struct udk_eth_dev *dev = NULL;
    int cnt_used_entries;
    int cnt_expected_entries;
    int cnt_driver_entries;

    cnt_expected_entries = udk_get_xstats_count(port_id);
    if (xstats_names == NULL || cnt_expected_entries < 0 ||    (int)size < cnt_expected_entries) {
        return cnt_expected_entries;
    }

    /* port_id checked in udk_get_xstats_count() */
    dev = &udk_eth_devices[port_id];
    cnt_used_entries = udk_eth_basic_stats_get_names(dev, xstats_names);

    if (dev->dev_ops->xstats_get_names != NULL) {
        cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(dev,
            xstats_names + cnt_used_entries, size - (uint32_t)cnt_used_entries);
        if (cnt_driver_entries < 0) {
            return udk_eth_err(port_id, cnt_driver_entries);
        }
        cnt_used_entries += cnt_driver_entries;
    }

    return cnt_used_entries;
}

static int udk_eth_stats_get(uint16_t port_id, struct udk_eth_stats *stats)
{
    struct udk_eth_dev *dev = NULL;

    if (!udk_eth_dev_is_valid_port(port_id)) {
        UDK_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id);
        return -EINVAL;
    }

    dev = &udk_eth_devices[port_id];
    (void)memset_s(stats, sizeof(*stats), 0, sizeof(*stats));

    UDK_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
    stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
    return udk_eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
}

static int udk_eth_basic_stats_get(uint16_t port_id, struct udk_eth_xstat *xstats)
{
    struct udk_eth_dev *dev = NULL;
    struct udk_eth_stats eth_stats;
    int count = 0;
    uint32_t i, q;
    uint64_t *stats = NULL;
    uint16_t nb_rxqs, nb_txqs;
    int ret;

    ret = udk_eth_stats_get(port_id, &eth_stats);
    if (ret < 0) {
        return ret;
    }

    dev = &udk_eth_devices[port_id];
    nb_rxqs = UDK_MIN(dev->data->nb_rx_queues, (uint16_t)UDK_ETHDEV_QUEUE_STAT_CNTRS);
    nb_txqs = UDK_MIN(dev->data->nb_tx_queues, (uint16_t)UDK_ETHDEV_QUEUE_STAT_CNTRS);
    /* global stats */
    for (i = 0; i < UDK_NB_STATS; i++) {
        stats = UDK_PTR_ADD(&eth_stats, udk_stats_strings[i].offset);
        xstats[count].value = *stats;
        count++;
    }

    /* stats per rxq */
    for (q = 0; q < nb_rxqs; q++) {
        for (i = 0; i < UDK_NB_RXQ_STATS; i++) {
            stats = UDK_PTR_ADD(&eth_stats, udk_rxq_stats_strings[i].offset + q * sizeof(uint64_t));
            xstats[count].value = *stats;
            count++;
        }
    }

    /* stats per txq */
    for (q = 0; q < nb_txqs; q++) {
        for (i = 0; i < UDK_NB_TXQ_STATS; i++) {
            stats = UDK_PTR_ADD(&eth_stats, udk_txq_stats_strings[i].offset + q * sizeof(uint64_t));
            xstats[count].value = *stats;
            count++;
        }
    }
    return count;
}

int udk_eth_xstats_get(uint16_t port_id, struct udk_eth_xstat *xstats, uint32_t n)
{
    struct udk_eth_dev *dev = NULL;
    int count;
    int i;
    int xcount = 0;
    uint16_t nb_rxqs, nb_txqs;

    if (!udk_eth_dev_is_valid_port(port_id)) {
        UDK_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id);
        return -EINVAL;
    }

    dev = &udk_eth_devices[port_id];
    nb_rxqs = UDK_MIN(dev->data->nb_rx_queues, (uint16_t)UDK_ETHDEV_QUEUE_STAT_CNTRS);
    nb_txqs = UDK_MIN(dev->data->nb_tx_queues, (uint16_t)UDK_ETHDEV_QUEUE_STAT_CNTRS);
    /* Return generic statistics */
    count = UDK_NB_STATS + (nb_rxqs * UDK_NB_RXQ_STATS) + (nb_txqs * UDK_NB_TXQ_STATS);

    /* implemented by the driver */
    if (dev->dev_ops->xstats_get != NULL) {
        /* Retrieve the xstats from the driver at the end of the xstats struct. */
        xcount = (*dev->dev_ops->xstats_get)(dev, xstats ? xstats + count : NULL,
            ((int)n > count) ? n - (uint32_t)count : 0);
        if (xcount < 0) {
            return udk_eth_err(port_id, xcount);
        }
    }

    if ((int)n < count + xcount || xstats == NULL) {
        return count + xcount;
    }

    /* now fill the xstats structure */
    count = udk_eth_basic_stats_get(port_id, xstats);
    if (count < 0) {
        return count;
    }

    for (i = 0; i < count; i++) {
        xstats[i].idx = i;
    }

    /* add an offset to driver-specific stats */
    for (i = count ; i < count + xcount; i++) {
        xstats[i].idx += (uint32_t)count;
    }

    return count + xcount;
}

uint32_t udk_eth_tx_queue_free_count_get(uint16_t port_id, uint16_t queue_id)
{
    struct udk_eth_dev *dev = &udk_eth_devices[port_id];

#ifdef UDK_ETHDEV_DEBUG
    if (!udk_eth_dev_is_valid_port(port_id)) {
        UDK_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id);
        return 0;
    }
    UDK_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_free_count, 0);

    if (queue_id >= dev->data->nb_tx_queues) {
        UDK_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
        return 0;
    }
#endif
    return (*dev->dev_ops->tx_queue_free_count)(dev, queue_id);
}

void udk_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id)
{
    struct udk_eth_dev *dev = &udk_eth_devices[port_id];

#ifdef UDK_ETHDEV_DEBUG
    if (!udk_eth_dev_is_valid_port(port_id)) {
        UDK_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id);
        return;
    }
    UDK_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, 0);

    if (queue_id >= dev->data->nb_tx_queues) {
        UDK_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
        return;
    }
#endif

    (*dev->dev_ops->tx_done_cleanup)(dev, queue_id);
    return;
}

int udk_dev_remove(struct udk_vdev_device *dev)
{
    int ret;

    if (udk_vdev_is_probed(dev) == 0) {
        UDK_ETHDEV_LOG(ERR, "Device is not probed\n");
        return -ENOENT;
    }

#ifdef UDK_HOT_REPLACE
    if (udk_process_type() != UDK_PROC_PRIMARY) {
        /* in hot-replace, detach the device itself. */
        ret = udk_vdev_uninit(dev->name);
        if (ret != 0) {
            UDK_ETHDEV_LOG(ERR, "Failed to detach device on secondary process\n");
        }
        return ret;
    }
#else
    if (udk_process_type() != UDK_PROC_PRIMARY) {
        /* If in secondary process, just send IPC request to primary process. */
        ret = udk_dev_hotplug_req_to_primary();
        if (ret != 0) {
            UDK_ETHDEV_LOG(ERR, "Failed to send hotplug request to primary\n");
            return -ENOMSG;
        }
        return 0;
    }
#endif

    /* primary detach the device itself. */
    ret = udk_vdev_uninit(dev->name);
    if (ret != 0) {
        UDK_ETHDEV_LOG(ERR, "Failed to detach device on primary process\n");
        return ret;
    }
    return 0;
}

int udk_eth_dev_release_port(struct udk_eth_dev *eth_dev)
{
    int ret;

    if (eth_dev == NULL) {
        return -EINVAL;
    }

    ret = udk_eth_dev_shared_data_prepare();
    if (ret != 0) {
        return ret;
    }

    udk_spinlock_lock(&udk_eth_dev_shared_data->ownership_lock);

    eth_dev->state = UDK_ETH_DEV_UNUSED;

#ifdef UDK_HOT_REPLACE
    udk_free(eth_dev->data->rx_queues);
    udk_free(eth_dev->data->tx_queues);
    udk_free(eth_dev->data->mac_addrs);
    udk_free(eth_dev->data->dev_private);
    (void)memset_s(eth_dev->data, sizeof(struct udk_eth_dev_data), 0, sizeof(struct udk_eth_dev_data));
#else
    if (udk_process_type() == UDK_PROC_PRIMARY) {
        udk_free(eth_dev->data->rx_queues);
        udk_free(eth_dev->data->tx_queues);
        udk_free(eth_dev->data->mac_addrs);
        udk_free(eth_dev->data->dev_private);
        (void)memset_s(eth_dev->data, sizeof(struct udk_eth_dev_data), 0, sizeof(struct udk_eth_dev_data));
    }
#endif

    udk_spinlock_unlock(&udk_eth_dev_shared_data->ownership_lock);

    return 0;
}

void udk_eth_dev_stop(uint16_t port_id)
{
    struct udk_eth_dev *dev = NULL;

    if (!udk_eth_dev_is_valid_port(port_id)) {
        UDK_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id);
        return;
    }

    dev = &udk_eth_devices[port_id];

    UDK_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);

    if (dev->data->dev_started == 0) {
        UDK_ETHDEV_LOG(INFO, "Device with port_id=%u already stopped\n", port_id);
        return;
    }

    dev->data->dev_started = 0;
    (*dev->dev_ops->dev_stop)(dev);
}
