// Copyright (c) 2025 Inochi Amaoto

/* SPDX-License-Identifier: GPL-2.0 */

#define pr_fmt(fmt) "vhost-vapp-dtq: " fmt

#include "vhost-vapp.h"
#include "vhost-vapp-dtq.h"
#include "vio-entity.h"

#define VAPP_CTRL_PKT_PAYLOAD_SIZE sizeof(struct vapp_ctrl_data_packet)

static struct workqueue_struct* vhost_vapp_workqueue;

/* ---- ctrl packet ---- */
static ssize_t vhost_vapp_do_send_ctrl_packet(struct vapp_send_ctrl_data_packet* packet, struct vhost_virtqueue* vq, unsigned int out, unsigned in)
{
  struct iov_iter iov_iter;
  size_t iov_len;
  ssize_t nbytes;

  if (out) {
    vq_err(vq, "Expect 0 output buffers, get %u\n", out);
    return -EFAULT;
  }

  iov_len = iov_length(vq->iov, in);
  if (iov_len < VAPP_CTRL_PKT_PAYLOAD_SIZE) {
    vq_err(vq, "Buffer too small, needed (%zu/%zi)", VAPP_CTRL_PKT_PAYLOAD_SIZE, iov_len);
    return -ENOMEM;
  }

  iov_iter_init(&iov_iter, READ, &vq->iov[out], in, iov_len);
  nbytes = copy_to_iter(&packet->pkt, VAPP_CTRL_PKT_PAYLOAD_SIZE, &iov_iter);
  if (nbytes != VAPP_CTRL_PKT_PAYLOAD_SIZE) {
    vq_err(vq, "Failed to copy packet buffer");
    return -EFAULT;
  }

  return nbytes;
}

static void vhost_vapp_do_ctrl_rx_handle(struct vhost_virtqueue* vq, struct vhost_vapp_dtq* dtq)
{
  int pkts = 0, total = 0;
  bool added = false;

  kthread_use_mm(dtq->vadev->dev.mm);

  do {
    struct vapp_send_ctrl_data_packet* packet;
    unsigned out, in;
    ssize_t nbytes;
    int head;

    packet = vapp_send_queue_get(dtq->queue, struct vapp_send_ctrl_data_packet, node);
    if (!packet) {
      vhost_enable_notify(&dtq->vadev->dev, vq);
      break;
    }

    head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), &out, & in, NULL, NULL);
    vapp_info("ctrl-rx: vhost_get_vq_desc: vq: %p, head: %d, out: %u in: %u\n", vq, head, out, in);

    // Error, terminate
    if (head < 0) {
      vapp_send_queue_add(dtq->queue, &packet->node);
      break;
    }

    // No buffer
    if (head == vq->num) {
      vapp_send_queue_add(dtq->queue, &packet->node);
      if (unlikely(vhost_enable_notify(&dtq->vadev->dev, vq))) {
        vhost_disable_notify(&dtq->vadev->dev, vq);
        continue;
      }
      break;
    }

    nbytes = vhost_vapp_do_send_ctrl_packet(packet, vq, out, in);
    if (nbytes < 0) {
      break;
    }

    vhost_add_used(vq, head, nbytes);
    added = true;
    total += nbytes;
  } while (likely(!vhost_exceeds_weight(vq, ++pkts, total)));

  vapp_info("ctrl-rx: vhost vq %p signal %d\n", vq, added);
  if (added)
    vhost_signal(&dtq->vadev->dev, vq);

	kthread_unuse_mm(dtq->vadev->dev.mm);
  mutex_unlock(&vq->mutex);
}

struct vapp_send_ctrl_data_packet* vhost_vapp_do_recv_ctrl_packet(struct vhost_virtqueue* vq, unsigned int out, unsigned in)
{
  struct vapp_send_ctrl_data_packet* packet;
  struct iov_iter iov_iter;
  size_t nbytes;
  size_t len;

  if (in) {
    vq_err(vq, "Expected 0 input buffers, got %u\n", in);
    return NULL;
  }

  len = iov_length(vq->iov, out);

  packet = vapp_send_ctrl_data_packet_alloc();
  if (!packet)
    return NULL;

  iov_iter_init(&iov_iter, WRITE, vq->iov, out, len);
  nbytes = copy_from_iter(&packet->pkt, sizeof(packet->pkt), &iov_iter);
  if (nbytes != sizeof(packet->pkt)) {
    vq_err(vq, "Expected %zu byte payload, got %zu bytes\n", len, nbytes);
    vapp_send_ctrl_data_packet_free(packet);
    return NULL;
  }

  return packet;
}

void vhost_vapp_do_ctrl_tx_handle(struct vhost_virtqueue* vq, struct vhost_vapp_dtq* dtq)
{
  struct vhost_vapp_dev* vapp;

  int head, pkts = 0, total = 0;
  unsigned int out, in;
  bool added = false;

  vapp = dtq->vadev;
  kthread_use_mm(dtq->vadev->dev.mm);

  do {
    u32 len;
    struct vapp_send_ctrl_data_packet* pkt;

    head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), &out, & in, NULL, NULL);
    vapp_info("ctrl-tx: vhost_get_vq_desc: vq: %p, head: %d, out: %u in: %u\n", vq, head, out, in);
    if (head < 0)
      break;

    if (head == vq->num) {
      if (unlikely(vhost_enable_notify(&vapp->dev, vq))) {
        vhost_disable_notify(&vapp->dev, vq);
        continue;
      }

      break;
    }

    pkt = vhost_vapp_do_recv_ctrl_packet(vq, out, in);
    if (!pkt) {
      vq_err(vq, "Failed to get pkt\n");
      continue;
    }
    len = sizeof(pkt->pkt);

    // TODO: process pkt
    vapp_send_ctrl_data_packet_free(pkt);

    total += len;
    vhost_add_used(vq, head, len);
    added = true;
  } while(likely(!vhost_exceeds_weight(vq, ++pkts, total)));

//  no_more_replies:
  vapp_info("ctrl-tx: vhost vq %p signal %d\n", vq, added);
  if (added)
    vhost_signal(&vapp->dev, vq);

	kthread_unuse_mm(dtq->vadev->dev.mm);
  mutex_unlock(&vq->mutex);
};

/* ---- data packet ---- */
static ssize_t vhost_vapp_do_send_data_packet(struct vapp_send_data_packet* packet, struct vhost_virtqueue* vq, unsigned int out, unsigned in)
{
  struct iov_iter iov_iter;
  size_t iov_len, need_bytes;
  ssize_t nbytes;
  u64 map;

  if (out) {
    vq_err(vq, "Expect 0 output buffers, get %u\n", out);
    return -EFAULT;
  }

  iov_len = iov_length(vq->iov, in);
  need_bytes = le32_to_cpu(packet->pkt.header.length);
  if (iov_len < sizeof(packet->pkt.header) + need_bytes) {
    vq_err(vq, "Buffer too small, needed (%zu/%zi)", sizeof(packet->pkt.header) + need_bytes, iov_len);
    return -ENOMEM;
  }

  iov_iter_init(&iov_iter, READ, &vq->iov[out], in, iov_len);
  nbytes = copy_to_iter(&packet->pkt.header, sizeof(packet->pkt.header), &iov_iter);
  if (nbytes != sizeof(packet->pkt.header)) {
    vq_err(vq, "Failed to copy packet header");
    return -EFAULT;
  }

  nbytes = copy_to_iter(packet->pkt.buffer, need_bytes, &iov_iter);
  if (nbytes != need_bytes) {
    vq_err(vq, "Failed to copy packet payload");
    return -EFAULT;
  }

  // TODO: remove entity if no_reply set
  map = vio_vapp_entity_set_map(packet->entity, packet->map);
  vapp_info("data-do-rx: vq %p, entity->id: %llu, map: 0x%016llx\n", vq, packet->entity->id, map);
  if (map == vio_vapp_packet_get_expect_map_id(packet->entity->alloc)) {
    vio_vapp_entity_init_map(packet->entity);
    if (VIO_TYPE_IS_NOREPLY(packet->entity->type)) {
      vapp_entity_tree_remove(packet->tree, packet->entity);
      vio_vapp_entity_free(packet->entity);
    }
  }

  nbytes += sizeof(packet->pkt.header);

  return nbytes;
}

static void vhost_vapp_do_data_rx_handle(struct vhost_virtqueue* vq, struct vhost_vapp_dtq* dtq)
{
  int pkts = 0, total = 0;
  bool added = false;

  kthread_use_mm(dtq->vadev->dev.mm);

  do {
    struct vapp_send_data_packet* packet;
    unsigned out, in;
    ssize_t nbytes;
    int head;

    packet = vapp_send_queue_get(dtq->queue, struct vapp_send_data_packet, node);
    if (!packet) {
      vhost_enable_notify(&dtq->vadev->dev, vq);
      break;
    }

    head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), &out, & in, NULL, NULL);
    // pr_info("data-rx: vhost_get_vq_desc: vq: %p, head: %d, out: %u in: %u, map: 0x%016llx\n", vq, head, out, in, packet->pkt.header.map);

    // Error, terminate
    if (head < 0) {
      vapp_send_queue_add(dtq->queue, &packet->node);
      break;
    }

    // No buffer
    if (head == vq->num) {
      vapp_send_queue_add(dtq->queue, &packet->node);
      if (unlikely(vhost_enable_notify(&dtq->vadev->dev, vq))) {
        vhost_disable_notify(&dtq->vadev->dev, vq);
        continue;
      }
      break;
    }

    nbytes = vhost_vapp_do_send_data_packet(packet, vq, out, in);
    vapp_send_data_packet_free(packet);
    if (nbytes < 0) {
      pr_info("data-rx: nbytes failed %zd\n", nbytes);
      break;
    }

    vhost_add_used(vq, head, nbytes);
    added = true;
    total += nbytes;
  } while (likely(!vhost_exceeds_weight(vq, ++pkts, total)));

  vapp_info("data-rx: vhost vq %p signal %d\n", vq, added);
  if (added)
    vhost_signal(&dtq->vadev->dev, vq);

	kthread_unuse_mm(dtq->vadev->dev.mm);
  mutex_unlock(&vq->mutex);
}

static ssize_t vhost_vapp_do_recv_data_packet(struct vhost_vapp_dev* vapp, struct vhost_virtqueue* vq, unsigned int out, unsigned in)
{
  struct vio_vapp_entity_header vheader;
  struct vio_vapp_entity_cpu_header header;
  struct iov_iter iov_iter;
  struct vio_vapp_entity_tree* tree;
  struct vio_vapp_entity* entity;
  size_t nbytes;
  u64 map;
  ssize_t len;

  if (in) {
    vq_err(vq, "Expected 0 input buffers, got %u\n", in);
    return -EFAULT;
  }

  len = iov_length(vq->iov, out);
  iov_iter_init(&iov_iter, WRITE, vq->iov, out, len);

  nbytes = copy_from_iter(&vheader, sizeof(vheader), &iov_iter);
  if (nbytes != sizeof(vheader)) {
    vq_err(vq, "Expected %zu byte payload, got %zu bytes\n", sizeof(header), nbytes);
    return -EPERM;
  }

  vapp_data_packet_header_to_cpu(&header, &vheader);

  tree = vhost_vapp_select_tree(vapp, header.type);

  entity = vapp_entity_tree_get(tree, header.id);
  if (!entity) {
    entity = vapp_entity_tree_get_or_insert_helper(tree, &header);
    if (unlikely(!entity))
      return -ENOMEM;
  }

  nbytes = copy_from_iter(entity->data + header.offset, header.length, &iov_iter);
  if (nbytes != header.length) {
    vq_err(vq, "Expected %zu byte payload, got %zu bytes\n", (size_t)header.length, nbytes);
    return -EPERM;
  }

  // TODO: if finished, send notify to upstream
  map = vio_vapp_entity_set_map(entity, header.map);
  vapp_info("data-do-tx: vq: %p, entity->id: %llu, map: 0x%016llx\n", vq, entity->id, map);
  if (map == vio_vapp_packet_get_expect_map_id(entity->alloc)) {
    pr_info("data-do-tx: notify (id: %llu, type: %u)\n", entity->id, entity->type);
    struct vio_vapp_notify_entity* notify = vio_vapp_notify_entity_alloc(vapp->conf.uuid, entity->id, entity->type);
    struct vio_vapp_context* ctx = vhost_vapp_get_vio_context();
    if (likely(notify) && likely(ctx))
      vio_vapp_notify_queue_mount(&ctx->queue, notify);
    else
      pr_warn("data-do-tx: notify failed for entity->id %llu %p %p\n", entity->id, notify, ctx);
  }

  return header.length + sizeof(header);
}

void vhost_vapp_do_data_tx_handle(struct vhost_virtqueue* vq, struct vhost_vapp_dtq* dtq)
{
  struct vhost_vapp_dev* vapp;

  int head, pkts = 0, total = 0;
  unsigned int out, in;
  bool added = false;

  vapp = dtq->vadev;
  kthread_use_mm(dtq->vadev->dev.mm);

  do {
    ssize_t len;

    head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), &out, & in, NULL, NULL);
    vapp_info("data-tx: vhost_get_vq_desc: vq: %p, head: %d, out: %u in: %u\n", vq, head, out, in);
    if (head < 0)
      break;

    if (head == vq->num) {
      if (unlikely(vhost_enable_notify(&vapp->dev, vq))) {
        vhost_disable_notify(&vapp->dev, vq);
        continue;
      }

      break;
    }

    len = vhost_vapp_do_recv_data_packet(vapp, vq, out, in);
    if (len < 0) {
      pr_info("data-rx: nbytes failed %zd\n", len);
      vq_err(vq, "Failed to get pkt\n");
      continue;
    }

    total += len;
    vhost_add_used(vq, head, len);
    added = true;
  } while(likely(!vhost_exceeds_weight(vq, ++pkts, total)));

//  no_more_replies:
  vapp_info("data-tx: vhost vq %p signal %d\n", vq, added);
  if (added)
    vhost_signal(&vapp->dev, vq);

	kthread_unuse_mm(dtq->vadev->dev.mm);
  mutex_unlock(&vq->mutex);
};

static int ___vhost_vapp_precondition_check(struct vhost_virtqueue* vq, struct vhost_vapp_dtq* dtq)
{
  mutex_lock(&vq->mutex);

  if (!vhost_vq_get_backend(vq))
    goto pre_failed;

  if (!vq_meta_prefetch(vq))
    goto pre_failed;

  vhost_disable_notify(&dtq->vadev->dev, vq);

  return 0;

 pre_failed:
  mutex_unlock(&vq->mutex);
  return -EFAULT;
}

#define vhost_check_queue_work(_vq, _dtq, _work) \
  do { \
    (void)(_vq); \
    if ((_dtq)) { \
      queue_work(vhost_vapp_workqueue, (_work)); \
    } \
  } while(0)

#define _vhost_check_queue_work(_vq, _dtq, _work) \
  do { \
    if (!___vhost_vapp_precondition_check((_vq), (_dtq))) { \
      _work((_vq), (_dtq)); \
    } \
  } while(0)

void vhost_vapp_ctrl_work(struct vhost_work* work)
{
  struct vhost_vapp_dtq* dtq = container_of(work, struct vhost_vapp_dtq, work);
  struct vhost_virtqueue* vq = &dtq->vqs[__VAPP_VQ_RX];

  vhost_check_queue_work(vq, dtq, &dtq->send_work);
  // _vhost_check_queue_work(vq, dtq, vhost_vapp_do_ctrl_rx_handle);
}
void vhost_vapp_dtq_ctrl_rx_work(struct vhost_work* work)
{
  struct vhost_virtqueue* vq = container_of(work, struct vhost_virtqueue, poll.work);
  struct vhost_vapp_dtq* dtq = vhost_vq_get_backend(vq);

  vhost_check_queue_work(vq, dtq, &dtq->rx_work);
  // _vhost_check_queue_work(vq, dtq, vhost_vapp_do_ctrl_rx_handle);
}
void vhost_vapp_dtq_ctrl_tx_work(struct vhost_work* work)
{
  struct vhost_virtqueue* vq = container_of(work, struct vhost_virtqueue, poll.work);
  struct vhost_vapp_dtq* dtq = vhost_vq_get_backend(vq);

  vhost_check_queue_work(vq, dtq, &dtq->tx_work);
  // _vhost_check_queue_work(vq, dtq, vhost_vapp_do_ctrl_tx_handle);
}

void vhost_vapp_data_work(struct vhost_work* work)
{
  struct vhost_vapp_dtq* dtq = container_of(work, struct vhost_vapp_dtq, work);
  struct vhost_virtqueue* vq = &dtq->vqs[__VAPP_VQ_RX];

  vhost_check_queue_work(vq, dtq, &dtq->send_work);
  // _vhost_check_queue_work(vq, dtq, vhost_vapp_do_data_rx_handle);
}
void vhost_vapp_dtq_data_rx_work(struct vhost_work* work)
{
  struct vhost_virtqueue* vq = container_of(work, struct vhost_virtqueue, poll.work);
  struct vhost_vapp_dtq* dtq = vhost_vq_get_backend(vq);

  vhost_check_queue_work(vq, dtq, &dtq->rx_work);
  // _vhost_check_queue_work(vq, dtq, vhost_vapp_do_data_rx_handle);
}
void vhost_vapp_dtq_data_tx_work(struct vhost_work* work)
{
  struct vhost_virtqueue* vq = container_of(work, struct vhost_virtqueue, poll.work);
  struct vhost_vapp_dtq* dtq = vhost_vq_get_backend(vq);

  vhost_check_queue_work(vq, dtq, &dtq->tx_work);
  // _vhost_check_queue_work(vq, dtq, vhost_vapp_do_data_tx_handle);
}

void vhost_vapp_dtq_ctrl_tx_queue_work(struct work_struct* work)
{
  struct vhost_vapp_dtq* dtq = container_of(work, struct vhost_vapp_dtq, tx_work);
  struct vhost_virtqueue* vq = &dtq->vqs[__VAPP_VQ_TX];

  _vhost_check_queue_work(vq, dtq, vhost_vapp_do_ctrl_rx_handle);
}
void vhost_vapp_dtq_ctrl_rx_queue_work(struct work_struct* work)
{
  struct vhost_vapp_dtq* dtq = container_of(work, struct vhost_vapp_dtq, rx_work);
  struct vhost_virtqueue* vq = &dtq->vqs[__VAPP_VQ_RX];

  _vhost_check_queue_work(vq, dtq, vhost_vapp_do_ctrl_rx_handle);
}
void vhost_vapp_send_ctrl_queue_work(struct work_struct* work)
{
  struct vhost_vapp_dtq* dtq = container_of(work, struct vhost_vapp_dtq, send_work);
  struct vhost_virtqueue* vq = &dtq->vqs[__VAPP_VQ_RX];

  _vhost_check_queue_work(vq, dtq, vhost_vapp_do_ctrl_rx_handle);
}

void vhost_vapp_dtq_data_tx_queue_work(struct work_struct* work)
{
  struct vhost_vapp_dtq* dtq = container_of(work, struct vhost_vapp_dtq, tx_work);
  struct vhost_virtqueue* vq = &dtq->vqs[__VAPP_VQ_TX];

  _vhost_check_queue_work(vq, dtq, vhost_vapp_do_data_tx_handle);
}
void vhost_vapp_dtq_data_rx_queue_work(struct work_struct* work)
{
  struct vhost_vapp_dtq* dtq = container_of(work, struct vhost_vapp_dtq, rx_work);
  struct vhost_virtqueue* vq = &dtq->vqs[__VAPP_VQ_RX];

  _vhost_check_queue_work(vq, dtq, vhost_vapp_do_data_rx_handle);
}
void vhost_vapp_send_data_queue_work(struct work_struct* work)
{
  struct vhost_vapp_dtq* dtq = container_of(work, struct vhost_vapp_dtq, send_work);
  struct vhost_virtqueue* vq = &dtq->vqs[__VAPP_VQ_RX];

  _vhost_check_queue_work(vq, dtq, vhost_vapp_do_data_rx_handle);
}


int vio_dtq_init(void)
{
  vhost_vapp_workqueue = alloc_workqueue("vhost_vapp", WQ_UNBOUND, 0);
	if (!vhost_vapp_workqueue)
		return -ENOMEM;

  return 0;
}

void vio_dtq_exit(void)
{
  destroy_workqueue(vhost_vapp_workqueue);
}
