/*
 * SPDX-License-Identifier: BSD-3-Clause
 * Copyright (c) Huawei Technologies Co., Ltd. 2021-2023. All rights reserved.
 * Description   : udk mbuf interface
 */

#ifndef UDK_MBUF_H
#define UDK_MBUF_H

#include "udk_mempool.h"
#include "udk_common.h"
#include "udk_atomic.h"

#ifdef __cplusplus
extern "C" {
#endif

#define UDK_MBUF_DEFAULT_MEMPOOL_OPS "ring_mp_mc"
#define UDK_PKTMBUF_HEADROOM 128

#define PKT_RX_VLAN          (1ULL << 0)
#define PKT_RX_RSS_HASH      (1ULL << 1)
#define PKT_RX_VLAN_STRIPPED (1ULL << 6)
#define PKT_RX_IP_CKSUM_UNKNOWN 0
#define PKT_RX_IP_CKSUM_BAD  (1ULL << 4)
#define PKT_RX_IP_CKSUM_GOOD (1ULL << 7)
#define PKT_RX_L4_CKSUM_BAD  (1ULL << 3)
#define PKT_RX_L4_CKSUM_GOOD (1ULL << 8)

#define PKT_TX_VLAN       (1ULL << 57)
#define PKT_TX_IP_CKSUM   (1ULL << 54)
#define PKT_TX_TCP_CKSUM  (1ULL << 52)
#define PKT_TX_UDP_CKSUM  (3ULL << 52)
#define PKT_TX_TCP_SEG    (1ULL << 50)
#define PKT_TX_IPV6       (1ULL << 56)
#define PKT_TX_OUTER_IPV6 (1ULL << 60)

#define UDK_PTYPE_L3_IPV4                   0x00000010
#define UDK_PTYPE_L3_IPV4_EXT               0x00000030
#define UDK_PTYPE_L4_FRAG                   0x00000300
#define UDK_PTYPE_L3_IPV6                   0x00000040
#define UDK_PTYPE_L4_SCTP                   0x00000400
#define UDK_PTYPE_L4_UDP                    0x00000200
#define UDK_PTYPE_TUNNEL_VXLAN              0x00003000
#define UDK_PTYPE_L4_TCP                    0x00000100
#define UDK_PTYPE_TUNNEL_NVGRE              0x00004000
#define UDK_PTYPE_L4_ICMP                   0x00000500
#define UDK_PTYPE_L2_ETHER_LLDP             0x00000004
#define UDK_PTYPE_L2_ETHER_ARP              0x00000003
#define UDK_PTYPE_L2_ETHER                  0x00000001

/* Mbuf having an external buffer attached. shinfo in mbuf must be filled. */
#define EXT_ATTACHED_MBUF       (1ULL << 61)
/* Indirect attached mbuf */
#define IND_ATTACHED_MBUF       (1ULL << 62)
/* Alignment constraint of mbuf private area. */
#define UDK_MBUF_PRIV_ALIGN     8
/* Returns TRUE if given mbuf is direct, or FALSE otherwise. */
#define UDK_MBUF_DIRECT(mb)     (!((mb)->ol_flags & (IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF)))

typedef void (*udk_mbuf_extbuf_free_callback_t)(void *addr, void *opaque);

struct udk_pktmbuf_pool_private {
    uint16_t mbuf_data_room_size; /* Size of data space in each mbuf. */
    uint16_t mbuf_priv_size;      /* Size of private area in each mbuf. */
    uint32_t flags;               /* reserved for future use. */
};

struct udk_mbuf_ext_shared_info {
    udk_mbuf_extbuf_free_callback_t free_cb;
    void *fcb_opaque;
    uint16_t refcnt;
};

struct udk_mbuf_sched {
    uint32_t queue_id;
    uint8_t traffic_class;
    uint8_t color;
    uint16_t reserved;
};

/*
 * The generic udk_mbuf, containing a packet mbuf.
 */
struct udk_mbuf {
    UDK_MARKER cacheline0;

    void *buf_addr;                                      /* Virtual address of segment buffer. */
    udk_iova_t buf_iova udk_aligned(sizeof(udk_iova_t)); /* physical address */

    /* next 8 bytes are initialised on RX descriptor rearm */
    UDK_MARKER64 rearm_data;
    uint16_t data_off;
    udk_atomic16_t refcnt_atomic;
    uint16_t nb_segs; /* Number of segments. */
    uint16_t port;

    uint64_t ol_flags; /* Offload features. */

    /* remaining bytes are set on RX when pulling packet from descriptor */
    UDK_MARKER rx_descriptor_fields1;

    UDK_STD_C11
    union {
        uint32_t packet_type; /* L2/L3/L4 and tunnel information. */
        __extension__ struct {
            uint32_t l2_type : 4;  /* (Outer) L2 type. */
            uint32_t l3_type : 4;  /* (Outer) L3 type. */
            uint32_t l4_type : 4;  /* (Outer) L4 type. */
            uint32_t tun_type : 4; /* Tunnel type. */
            UDK_STD_C11
            union {
                uint8_t inner_esp_next_proto;
                __extension__ struct {
                    uint8_t inner_l2_type : 4; /* Inner L2 type. */
                    uint8_t inner_l3_type : 4; /* Inner L3 type. */
                };
            };
            uint32_t inner_l4_type : 4; /* Inner L4 type. */
        };
    };

    uint32_t pkt_len;  /* Total pkt len: sum of all segments. */
    uint16_t data_len; /* Amount of data in segment buffer. */
    uint16_t vlan_tci; /* VLAN TCI (CPU order), valid if PKT_RX_VLAN is set. */

    UDK_STD_C11
    union {
        union {
            uint32_t rss; /* RSS hash result if RSS enabled */
            struct {
                union {
                    struct {
                        uint16_t hash;
                        uint16_t id;
                    };
                    uint32_t lo; /* Second 4 flexible bytes */
                };
                uint32_t hi; /* First 4 flexible bytes or FD ID, dependent on PKT_RX_FDIR_* flag in ol_flags. */
            } fdir;          /* Filter identifier if FDIR enabled */
            struct udk_mbuf_sched sched; /* Hierarchical scheduler : 8 bytes */
            struct {
                uint32_t reserved1;
                uint16_t reserved2;
                uint16_t txq;
            } txadapter; /* Eventdev ethdev Tx adapter */
            uint32_t usr;
        } hash; /* hash information */
    };

    uint16_t vlan_tci_outer; /*  Outer VLAN TCI (CPU order), valid if PKT_RX_QINQ is set. */
    uint16_t buf_len; /* Length of segment buffer. */
    struct udk_mempool *pool; /* Pool from which mbuf was allocated. */

    /* second cache line - fields only used in slow path or on TX */
    UDK_MARKER cacheline1 udk_cache_min_aligned;

    struct udk_mbuf *next; /* Next segment of scattered packet. */

    /* fields to support TX offloads */
    UDK_STD_C11
    union {
        uint64_t tx_offload;
        __extension__ struct {
            uint64_t l2_len : 7;
            uint64_t l3_len : 9;
            uint64_t l4_len : 8;
            uint64_t tso_segsz : 16;
            uint64_t outer_l3_len : 9;
            uint64_t outer_l2_len : 7;
        };
    };

    /* Shared data for external buffer attached to mbuf. */
    struct udk_mbuf_ext_shared_info *shinfo;

    uint16_t priv_size;
    uint16_t timesync;
    uint32_t dynfield0;

    UDK_STD_C11
    union {
        void *userdata;   /* *< Can be used for external metadata */
        uint64_t udata64; /* *< Allow 8-byte userdata on 32-bit */
    };

    uint32_t internal;      /* internal use mbuf, app set it to 0 */
    uint32_t dynfield1[5];  /* Reserved for dynamic fields. */
} udk_cache_aligned;

/* Points to an offset into the data in the mbuf */
#define udk_pktmbuf_mtod_offset(m, t, o) ((t)((char *)(m)->buf_addr + (m)->data_off + (o)))

/* Points the start of the data in the mbuf */
#define udk_pktmbuf_mtod(m, t) udk_pktmbuf_mtod_offset(m, t, 0)

/* Returns the length of the segment. */
#define udk_pktmbuf_data_len(m) ((m)->data_len)

/* Returns the default IO address of the beginning of the mbuf data */
static inline udk_iova_t udk_mbuf_data_iova_default(const struct udk_mbuf *mb)
{
    return mb->buf_iova + UDK_PKTMBUF_HEADROOM;
}

/* Returns the IO address of the beginning of the mbuf data */
static inline udk_iova_t udk_mbuf_data_iova(const struct udk_mbuf *mb)
{
    return mb->buf_iova + mb->data_off;
}

static inline uint16_t udk_mbuf_refcnt_read(const struct udk_mbuf *m)
{
    return (uint16_t)(udk_atomic16_read(&m->refcnt_atomic));
}

static inline void udk_mbuf_refcnt_set(struct udk_mbuf *m, uint16_t new_value)
{
    udk_atomic16_set(&m->refcnt_atomic, (int16_t)new_value);
}

#ifdef UDK_MBUF_DEBUG
#define udk_mbuf_sanity_check(m, is_header) mbuf_sanity_check(m, is_header)
#else
#define udk_mbuf_sanity_check(m, is_header) do { } while (0)
#endif

static udk_force_inline void udk_mbuf_raw_sanity_check(udk_unused const struct udk_mbuf *m)
{
    UDK_ASSERT(udk_mbuf_refcnt_read(m) == 1);
    UDK_ASSERT((m)->next == NULL);
    UDK_ASSERT((m)->nb_segs == 1);
    udk_mbuf_sanity_check(m, 0);
}

#define UDK_MBUF_RAW_ALLOC_CHECK(m) udk_mbuf_raw_sanity_check(m)

static inline uint16_t udk_pktmbuf_data_room_size(struct udk_mempool *mp)
{
    struct udk_pktmbuf_pool_private *mbp_priv =
        (struct udk_pktmbuf_pool_private *)udk_mempool_get_priv(mp);

    return mbp_priv->mbuf_data_room_size;
}

/* Get the application private size of mbufs stored in a pktmbuf_pool */
static inline uint16_t udk_pktmbuf_priv_size(struct udk_mempool *mp)
{
    struct udk_pktmbuf_pool_private *mbp_priv;

    mbp_priv = (struct udk_pktmbuf_pool_private *)udk_mempool_get_priv(mp);
    return mbp_priv->mbuf_priv_size;
}

static inline void udk_pktmbuf_reset_headroom(struct udk_mbuf *m)
{
    m->data_off = (uint16_t)UDK_MIN((uint16_t)UDK_PKTMBUF_HEADROOM, (uint16_t)m->buf_len);
    return;
}

#define UDK_MBUF_INVALID_PORT   ((uint16_t)-1)

static inline void udk_pktmbuf_reset(struct udk_mbuf *m)
{
    m->next = NULL;
    m->vlan_tci = 0;
    m->vlan_tci_outer = 0;
    m->pkt_len = 0;
    m->tx_offload = 0;
    m->nb_segs = 1;
    m->port = UDK_MBUF_INVALID_PORT;

    m->ol_flags = 0;
    m->packet_type = 0;
    udk_pktmbuf_reset_headroom(m);

    m->data_len = 0;

    udk_mbuf_sanity_check(m, 1);

    return;
}

static inline struct udk_mbuf *udk_mbuf_raw_alloc(struct udk_mempool *mp)
{
    struct udk_mbuf *m = NULL;
    if (udk_mempool_get_bulk(mp, (void **)&m, 1) < 0) {
        return NULL;
    }
    UDK_MBUF_RAW_ALLOC_CHECK(m);
    return m;
}

/* Decrease reference counter and unlink a mbuf segment */
static udk_force_inline struct udk_mbuf *udk_pktmbuf_prefree_seg(struct udk_mbuf *m)
{
    udk_mbuf_sanity_check(m, 0);

    UDK_ASSERT(udk_mbuf_refcnt_read(m) <= 1);

    if (likely(udk_mbuf_refcnt_read(m) == 1)) {
        if (m->next != NULL) {
            m->next = NULL;
            m->nb_segs = 1;
        }
        return m;
    }
    return NULL;
}

/* Put mbuf back into its original mempool. */
static udk_force_inline void udk_mbuf_raw_free(struct udk_mbuf *m)
{
    UDK_ASSERT(UDK_MBUF_DIRECT(m));
    UDK_MBUF_RAW_ALLOC_CHECK(m);
    udk_mempool_put(m->pool, m);
    return;
}

static udk_force_inline void udk_pktmbuf_free_seg(struct udk_mbuf *m)
{
    m = udk_pktmbuf_prefree_seg(m);
    if (likely(m != NULL)) {
        udk_mbuf_raw_free(m);
    }

    return;
}

void udk_pktmbuf_init(struct udk_mempool *mp, void *opaque_arg, void *m, unsigned i);
struct udk_mbuf *udk_pktmbuf_alloc(struct udk_mempool *mp);
int udk_pktmbuf_alloc_bulk(struct udk_mempool *pool, struct udk_mbuf **mbufs, uint32_t count);
void udk_pktmbuf_free(struct udk_mbuf *m);

/*
When the udk_pktmbuf_pool_create() / udk_pktmbuf_pool_create_by_ops() interface is invoked in dlopen mode and
the input parameter cache_size should be 0. The cost of calling udk_lcore_id in the udk_mempool_put() and
udk_mempool_get_bulk() interfaces can be reduced.
*/
struct udk_mempool *udk_pktmbuf_pool_create(const char *name, uint32_t n, uint32_t cache_size, uint16_t priv_size,
    uint16_t data_room_size, int socket_id, uint32_t flags);
struct udk_mempool *udk_pktmbuf_pool_create_by_ops(const char *name, uint32_t n, uint32_t cache_size,
    uint16_t priv_size, uint16_t data_room_size, int socket_id, const char *ops_name, uint32_t flags);
void mbuf_sanity_check(const struct udk_mbuf *m, int is_header);
void udk_pktmbuf_dump(FILE *f, const struct udk_mbuf *m, uint32_t dump_len);

#ifdef __cplusplus
}
#endif

#endif
