#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <unistd.h>
#include <fcntl.h>
#include <termios.h>
#include <syserr.h>
#include <device.h>
#include <utils.h>
#include <sys/ioctl.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <sys/epoll.h>
#include <sys/uio.h>
#include <netinet/tcp.h>
#include "device.h"
#include "netcore.h"
#include "evtloop.h"
#include "dev/stream.h"

#define RETRY_ON_WRITE_ERROR(errno) (errno == EINTR)

static ssize_t uv__readv(int fd, struct iovec *vec, size_t n)
{
    if (n == 1)
        return read(fd, vec->iov_base, vec->iov_len);
    else
        return readv(fd, vec, n);
}
static ssize_t uv__writev(int fd, struct iovec *vec, size_t n)
{
    if (n == 1)
        return write(fd, vec->iov_base, vec->iov_len);
    else
        return writev(fd, vec, n);
}

static void frame_tmr_cb(void *data)
{
    struct stream_device *stream_dev = data;
    device_t *dev = &stream_dev->dev;

    if (dev->usr_cb && dev->usr_cb->ev_cb)
        dev->usr_cb->ev_cb(dev, DE_RCV, NULL);
}

static int stream_init(device_t *dev)
{
    int ret = 0;
    struct stream_device *stream_dev = to_stream_dev(dev);

    if (stream_dev->ops->init)
    {
        ret = stream_dev->ops->init(stream_dev);
        if (ret < 0) return ret;
    }

    if (stream_dev->rxcache_size > 0)
    {
        ret = kfifo_alloc(&stream_dev->rxcache, stream_dev->rxcache_size);
        if (ret < 0) goto fail_alloc_rx;
    }

    if (stream_dev->txcache_size > 0)
    {
        ret = kfifo_alloc(&stream_dev->txcache, stream_dev->txcache_size);
        if (ret < 0) goto fail_alloc_tx;
    }

    soft_timer_add(dev_loop(dev), &stream_dev->frame_tmr, dev_name(dev),
                   frame_tmr_cb, stream_dev);
    return 0;

fail_alloc_tx:
    if (stream_dev->rxcache_size > 0)
        kfifo_free(&stream_dev->rxcache);
fail_alloc_rx:
    if (stream_dev->ops->release)
        stream_dev->ops->release(stream_dev);
    return ret;
}
static void stream_release(device_t *dev)
{
    struct stream_device *stream_dev = to_stream_dev(dev);

    if (stream_dev->ops->release)
        stream_dev->ops->release(stream_dev);
    if (stream_dev->rxcache_size > 0)
        kfifo_free(&stream_dev->rxcache);
    if (stream_dev->txcache_size > 0)
        kfifo_free(&stream_dev->txcache);
    soft_timer_del(&stream_dev->frame_tmr);
}
static void stream_reset(device_t *dev)
{
    struct stream_device *stream_dev = to_stream_dev(dev);

    if (stream_dev->rxcache_size > 0)
        kfifo_reset(&stream_dev->rxcache);
    if (stream_dev->txcache_size > 0)
        kfifo_reset(&stream_dev->txcache);
}
static int stream_open(device_t *dev)
{
    struct stream_device *stream_dev = to_stream_dev(dev);

    if (stream_dev->ops->open)
        return stream_dev->ops->open(stream_dev);
    return 0;
}

static int stream_ctrl(device_t *dev, u32 cmd, void *args)
{
    struct stream_device *stream_dev = to_stream_dev(dev);

    switch (cmd)
    {
    case DEV_CTRL_CMD_GET_RXFIFO_SIZE:
        *(u32 *)args = stream_dev->rxcache_size;
        break;
    case DEV_CTRL_CMD_SET_RXFIFO_SIZE:
        if (ev__is_active(dev))
            return -EBUSY;
        stream_dev->rxcache_size = *(u32 *)args;
        break;
    case DEV_CTRL_CMD_GET_TXFIFO_SIZE:
        *(u32 *)args = stream_dev->txcache_size;
        break;
    case DEV_CTRL_CMD_SET_TXFIFO_SIZE:
        if (ev__is_active(dev))
            return -EBUSY;
        stream_dev->txcache_size = *(u32 *)args;
        break;
    case DEV_CTRL_CMD_GET_FIFO_LEN:
        *(u32 *)args = kfifo_len(&stream_dev->rxcache);
        break;
    case DEV_CTRL_CMD_GET_FIFO_INFO:
        *(unsigned long *)args = (unsigned long)&stream_dev->rxcache;
        break;

    case DEV_CTRL_CMD_GET_FRAME_TO:
        *(u16 *)args = stream_dev->frame_to;
        break;
    case DEV_CTRL_CMD_SET_FRAME_TO:
        stream_dev->frame_to = *(u16 *)args;
        break;

    default:
        if (stream_dev->ops->ctrl)
            return stream_dev->ops->ctrl(stream_dev, cmd, args);
        return -ENOSYS;
    }
    return 0;
}

static ssize_t stream_peek(device_t *dev, void *buffer, size_t size)
{
    struct stream_device *stream_dev = to_stream_dev(dev);

    if (stream_dev->rxcache_size == 0)
        return 0;

    if (dev->oflag & DEV_OFLAG_FSYNC)
        stream_poll_read(stream_dev);

    return kfifo_out_peek(&stream_dev->rxcache, buffer, size);
}

static ssize_t stream_do_read(struct stream_device *stream_dev,
                              ev_buf_t *buf, size_t n)
{
    device_t *dev = to_dev(stream_dev);
    int r;

    if (stream_dev->ops->ioread)
        return stream_dev->ops->ioread(stream_dev, buf, n);

    do {
        r = uv__readv(dev_fd(dev), (struct iovec *)buf, n);
    } while (r < 0 && errno == EINTR);

    return r;
}
static ssize_t stream_read(device_t *dev, void *buffer, size_t size)
{
    DEV_PD("<%s>: %s...\n", dev_name(dev), __func__);

    if (dev_status(dev) != DEVICE_STATUS_CONNECTED)
    {
        DEV_PE("<%s>: not connected, read fault!\n", dev_name(dev));
        return -ENOTCONN;
    }

    struct stream_device *stream_dev = to_stream_dev(dev);

    if (stream_dev->rxcache_size == 0)
    {
        ev_buf_t buf = {
            .base = (void *)buffer,
            .len  = size,
        };
        return stream_do_read(stream_dev, &buf, 1);
    }

    if (!buffer)
    {
        size_t dlen = kfifo_len(&stream_dev->rxcache);
        if (size > dlen) size = dlen;
        kfifo_skip(&stream_dev->rxcache, size);
        return size;
    }

    if (dev->oflag & DEV_OFLAG_FSYNC)
        stream_poll_read(stream_dev);

    return kfifo_out(&stream_dev->rxcache, buffer, size);
}

static ssize_t stream_do_write(struct stream_device *stream_dev,
                               ev_buf_t *buf, size_t n)
{
    device_t *dev = &stream_dev->dev;
    ssize_t r;

    if (stream_dev->ops->iowrite)
    {
        r = stream_dev->ops->iowrite(stream_dev, buf, n);
    }
    else
    {
        do
            r = uv__writev(dev_fd(dev), (struct iovec *)buf, n);
        while (r == -1 && RETRY_ON_WRITE_ERROR(errno));
    }

    if (r > 0)
        dev->dstats.tx += r;
    return r;
}
static ssize_t stream_write(device_t *dev, const void *buffer, size_t size)
{
    int ret;
    struct stream_device *stream_dev = to_stream_dev(dev);

    DEV_PD("<%s>: %s...\n", dev_name(dev), __func__);

    if (dev_status(dev) != DEVICE_STATUS_CONNECTED)
    {
        DEV_PE("<%s>: not connected, write fault!\n", dev_name(dev));
        return -ENOTCONN;
    }

    /* no tx cache, write directly.
     */
    if (stream_dev->txcache_size == 0)
    {
        ev_buf_t buf = {
            .base = (void *)buffer,
            .len  = size,
        };
        return stream_do_write(stream_dev, &buf, 1);
    }

    if (kfifo_avail(&stream_dev->txcache) < size)
        return -EBUSY;

    ret = kfifo_in(&stream_dev->txcache, buffer, size);
    assert(ret == size);

    ev__io_start(dev_loop(dev), &dev->io, EPOLLOUT);
    return ret;
}

int stream_poll_read(struct stream_device *stream_dev)
{
    ev_buf_t buf[2];
    int r;

    if (stream_dev->rxcache_size == 0)
    {
        buf[0].base = &r;
        buf[0].len  = sizeof(r);
        return stream_do_read(stream_dev, buf, 1);
    }

    r = kfifo_dma_in_prepare(&stream_dev->rxcache, buf, array_size(buf),
                             stream_dev->rxcache_size);
    r = stream_do_read(stream_dev, buf, r);
    if (r > 0)
        kfifo_dma_in_finish(&stream_dev->rxcache, r);
    return r;
}
static ssize_t stream_send_cache(struct stream_device *stream_dev)
{
    device_t *dev = &stream_dev->dev;
    ev_buf_t buf[2];
    int r;

    if (kfifo_is_empty(&stream_dev->txcache))
    {
        /* no data to be sent, stop pollout event.
         */
        ev__io_stop(dev_loop(dev), &dev->io, EPOLLOUT);
        return 0;
    }

    r = kfifo_dma_out_prepare(&stream_dev->txcache, buf, array_size(buf),
                              stream_dev->txcache_size);
    r = stream_do_write(stream_dev, buf, r);
    if (r > 0)
        kfifo_dma_out_finish(&stream_dev->txcache, r);
    return r;
}
static void stream_poll_out(device_t *dev)
{
    struct stream_device *stream_dev = to_stream_dev(dev);

    if (dev_status(dev) == DEVICE_STATUS_CONNECTED)
    {
        assert(stream_dev->txcache_size > 0);
        stream_send_cache(stream_dev);
    }
    else
    {
        if (stream_dev->ops->on_poll_out)
            stream_dev->ops->on_poll_out(stream_dev);
    }
}
static void stream_poll_in(device_t *dev)
{
    struct stream_device *stream_dev = to_stream_dev(dev);

    if (stream_dev->ops && stream_dev->ops->on_poll_in)
        return stream_dev->ops->on_poll_in(stream_dev);

    int l = stream_poll_read(stream_dev);
    if (l == 0)
    {
        if (stream_dev->ops->on_eof)
            stream_dev->ops->on_eof(stream_dev);
        else
            device_close(dev);
    }
    else
    {
        if (!dev_is_master(dev))
            dev->master->proxy = dev;

        dev->dstats.rx += l;

        u32 frame_to = stream_dev->frame_to;
        if (l == -ENOMEM || frame_to == 0)
        {
            if (dev->usr_cb && dev->usr_cb->ev_cb)
                dev->usr_cb->ev_cb(dev, DE_RCV, NULL);
        }
        else
        {
            soft_timer_start(&stream_dev->frame_tmr, MS_TO_TICKS(frame_to), 0);
        }
    }
}
static const struct device_ops stream_ops =
{
    .init  = stream_init,
    .open  = stream_open,
    .ctrl  = stream_ctrl,
    .reset = stream_reset,
    .release = stream_release,
    .peek  = stream_peek,
    .read  = stream_read,
    .write = stream_write,

    .on_poll_in  = stream_poll_in,
    .on_poll_out = stream_poll_out,
};

int stream_device_register(struct stream_device *stream_dev, const char *name,
                           void *pri)
{
    device_t *dev = to_dev(stream_dev);

    stream_dev->rxcache_size = STREAM_DEFAULT_CACHE_SIZE;
    stream_dev->txcache_size = 0;
    dev->ops  = &stream_ops;
    dev->pri  = pri;

    return device_register(dev, name, 0);
}
void stream_device_unregister(struct stream_device *stream_dev)
{
    device_unregister(to_dev(stream_dev));
}
