#include <assert.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdbool.h>
#include <unistd.h>
#include <netio/netio.h>
#include <tmc/alloc.h>
#include <tmc/cpus.h>
#include <tmc/sync.h>
#include <tmc/task.h>

#include "if_netio_api.h"

// Make inlines mandatory, not advisory.

#define INLINE __attribute__((always_inline))

/*
 * Collect summary counts.
 */
void if_netio_show_stat (netio_instance_t *instance)
{
    uint32_t shim;
    netio_stat_t ns;

    if (netio_get(&instance->queue, NETIO_PARAM, NETIO_PARAM_OVERFLOW,
                  &shim, sizeof shim)
            == sizeof shim);
    printf("\nShow Netio status:\n");
    printf("shim\tdrop %u trunc %u\n", shim & 0xFFFF, shim >> 16);
    if (netio_get(&instance->queue, NETIO_PARAM, NETIO_PARAM_STAT, &ns,
                  sizeof (ns)) == sizeof (ns))
        printf("ipp\trecv %u drop %u\n", ns.packets_received, ns.packets_dropped);

    printf("Netio\t Rx %u Tx/Retry/Busy/Err %u/%u/%u/%u\n",
           instance->rx_counter,
           instance->tx_counter,
           instance->tx_retry_counter,
           instance->tx_busy_counter,
           instance->tx_err_counter);
}

//========================================
// NetIO configuration.

// Configure a queue.
// For a shared queue, we are careful to register workers serially.
//
static netio_error_t queue_config (netio_instance_t *instance, char *interface)
{
    netio_error_t err;
    netio_queue_t *queue = &instance->queue;

    netio_input_config_t config =
    {
        .flags = NETIO_RECV |  NETIO_XMIT_CSUM | NETIO_TAG_NONE,
        .num_receive_packets = NETIO_MAX_RECEIVE_PKTS,
        .interface = interface,
        .num_send_buffers_small_total = NETIO_MAX_SEND_BUFFERS,
        .num_send_buffers_large_total = NETIO_MAX_SEND_BUFFERS,
        .num_send_buffers_jumbo_total = NETIO_MAX_SEND_BUFFERS,
        .num_send_buffers_small_prealloc = NETIO_MAX_SEND_BUFFERS,
        .num_send_buffers_large_prealloc = NETIO_MAX_SEND_BUFFERS,
        .num_send_buffers_jumbo_prealloc = NETIO_MAX_SEND_BUFFERS,
        .total_buffer_size = 0x8000000,
        .queue_id = instance->worker_id
    };

    // Loop on netio_input_register() in case the link is down.
    while (1)
    {
        err = netio_input_register(&config, queue);
        if (err == NETIO_NO_ERROR)
            break;
        else if (err == NETIO_LINK_DOWN)
        {
            printf("Link %s is down, retrying.\n", interface);
            sleep(2);
            continue;
        }
        else
        {
            printf("netio input_register %d failed, status %d(%s)\n",
                   instance->worker_id, err, netio_strerror(err));
            return err;
        }
    }
    return err;
}

/*
 * Input:
 * interface -- interface name.
 * worker_id -- work id, from 0 to workers.
 *
 * output:
 * netio instance handle.
 */
netio_instance_t *if_netio_init(char *interface, unsigned int worker_id)
{
    netio_instance_t *instance;
    netio_error_t err;

    instance = malloc(sizeof(netio_instance_t));
    if (!instance)
    {
        printf("Insufficient memory available.");
        return NULL;
    }
    memset((void *)instance, 0, sizeof(netio_instance_t));
    instance->worker_id = worker_id;

    // Configure one queue to each worker.
    //
    err = queue_config(instance, interface);
    if (err != NETIO_NO_ERROR)
    {
        free(instance);
        printf("%s failed to init queue for worker %d, status %d(%s)\n",
               __FUNCTION__, instance->worker_id, err, netio_strerror(err));
        return NULL;
    }

    // OK. All set to go.
    printf("\nNetio init ok on %s for worker %d",
           interface, instance->worker_id);

    return instance;
}

// Define a flow hash across a set of buckets.
// Map the buckets to our worker queues.
// There should be at least as many buckets as workers.
//
/* workers -- total process numbers. */
void flow_config (netio_instance_t *instance, unsigned int workers)
{
    netio_error_t err;
    netio_group_t flowtbl;
    netio_bucket_t map[1024];
    netio_queue_t *queue = &instance->queue;

    for (int b = 0; b < NETIO_FLOWS; ++b)
    {
        map[b] = b % workers;
    }

    err = netio_input_bucket_configure(queue, 0, map, NETIO_FLOWS);
    if (err != NETIO_NO_ERROR)
    {
        tmc_task_die("netio_input_bucket_configure(%d) returned: %d(%s)\n",
                     NETIO_FLOWS, err, netio_strerror(err));

        return;
    }

    flowtbl.word = 0;
    flowtbl.bits.__balance_on_l4 = 1;    // Hash on ports?
    flowtbl.bits.__balance_on_l3 = 1;    // Hash on IP addresses?
    flowtbl.bits.__balance_on_l2 = 0;    // Hash on Ethernet Mac address
    flowtbl.bits.__bucket_base = 0;   // Hash table
    flowtbl.bits.__bucket_mask = NETIO_FLOWS - 1;

    err = netio_input_group_configure(queue, 0, &flowtbl, 1);
    if (err != NETIO_NO_ERROR)
    {
        tmc_task_die("netio_input_group_configure failed, status: %d(%s)\n",
                     err, netio_strerror(err));
        return;
    }
}


