// Copyright (c) 2025 Inochi Amaoto

/* SPDX-License-Identifier: GPL-2.0 */

#define pr_fmt(fmt) "virtio-vapp-dtq: " fmt

#include <linux/module.h>

#include "virtio-vapp.h"
#include "virtio-vapp-dtq.h"
#include "vio-entity.h"

static struct workqueue_struct* virtio_vapp_workqueue;

bool virtio_vapp_queue_work(struct work_struct* work)
{
  return queue_work(virtio_vapp_workqueue, work);
}

void virtio_vapp_dtq_ctrl_tx_kick(struct virtqueue* vq)
{
  struct virtio_vapp_dev* vapp = vq->vdev->priv;
  if (!vapp)
    return;
  queue_work(virtio_vapp_workqueue, &vapp->cdtq.tx_work);
}

void virtio_vapp_dtq_ctrl_rx_kick(struct virtqueue* vq)
{
  struct virtio_vapp_dev* vapp = vq->vdev->priv;
  if (!vapp)
    return;
  queue_work(virtio_vapp_workqueue, &vapp->cdtq.rx_work);
}

void virtio_vapp_dtq_data_tx_kick(struct virtqueue* vq)
{
  struct virtio_vapp_dev* vapp = vq->vdev->priv;
  int index;

  if (!vapp)
    return;

  index = vq->index - __VAPP_VQ_MAX;
  index /= 2;
  vapp_info("kick: process dtq %d tx work, vq %d\n", index, vq->index);
  if (likely(index < vapp->config.multiqueues))
    queue_work(virtio_vapp_workqueue, &vapp->ddtqs[index].tx_work);
}

void virtio_vapp_dtq_data_rx_kick(struct virtqueue* vq)
{
  struct virtio_vapp_dev* vapp = vq->vdev->priv;
  int index;

  if (!vapp)
    return;

  index = vq->index - __VAPP_VQ_MAX;
  index /= 2;

  vapp_info("kick: process dtq %d rx work, vq %d\n", index, vq->index);

  if (likely(index < vapp->config.multiqueues))
    queue_work(virtio_vapp_workqueue, &vapp->ddtqs[index].rx_work);
}

void virtio_vapp_dtq_ctrl_tx_work(struct work_struct* work)
{
  struct virtio_vapp_dtq* dtq = container_of(work, struct virtio_vapp_dtq, tx_work);
  struct virtqueue* vq = dtq->vqs[__VAPP_VQ_TX];
  bool added = false;

  virtio_vapp_dtq_lock_tx(dtq);

  if (!virtio_vapp_dtq_is_tx_run(dtq))
    goto not_run;

  do {
    struct vapp_send_ctrl_data_packet* packet;
    unsigned int len;
    while ((packet = virtqueue_get_buf(vq, &len)) != NULL) {
      // TODO: notify it

      vapp_send_ctrl_data_packet_free(packet);
      added = true;
    }

    virtqueue_disable_cb(vq);
  } while (!virtqueue_enable_cb(vq));

 not_run:
  virtio_vapp_dtq_unlock_tx(dtq);

	if (added)
    virtio_vapp_queue_work(&dtq->send_work);
}

void virtio_vapp_dtq_ctrl_rx_work(struct work_struct* work)
{
  struct virtio_vapp_dtq* dtq = container_of(work, struct virtio_vapp_dtq, rx_work);
  struct virtqueue* vq = dtq->vqs[__VAPP_VQ_RX];

  virtio_vapp_dtq_lock_rx(dtq);

  if (!virtio_vapp_dtq_is_rx_run(dtq))
    goto not_run;

  do {
    virtqueue_disable_cb(vq);
    for (;;) {
      struct vapp_send_ctrl_data_packet* packet;
      unsigned int len;

      packet = virtqueue_get_buf(vq, &len);

      if (unlikely(len < sizeof(packet->pkt))) {
        vapp_send_ctrl_data_packet_free(packet);
        continue;
      }

      // TODO: process it
      vapp_send_ctrl_data_packet_free(packet);
      dtq->rx_bufs--;
    }
  } while (!virtqueue_enable_cb(vq));

 not_run:
  if (dtq->rx_bufs < dtq->max_rx_bufs / VIRTIO_VAPP_DTQ_REFILL_DIV_FACTOR)
    virtio_vapp_dtq_data_rx_fill(dtq);
  else
    virtqueue_kick(vq);
  virtio_vapp_dtq_unlock_rx(dtq);
}

void virtio_vapp_send_ctrl_work(struct work_struct* work)
{
  struct virtio_vapp_dtq* dtq = container_of(work, struct virtio_vapp_dtq, send_work);
  struct virtqueue* vq = dtq->vqs[__VAPP_VQ_TX];
  struct scatterlist hdr,* sgs[1];
  bool added = false;

  virtio_vapp_dtq_lock_tx(dtq);

  if (!virtio_vapp_dtq_is_tx_run(dtq))
    goto not_run;

  for (;;) {
    struct vapp_send_ctrl_data_packet* packet;
    int ret;

    packet = vapp_send_queue_get(dtq->queue, struct vapp_send_ctrl_data_packet, node);
    if (!packet)
      break;

    sg_init_one(&hdr, &packet->pkt, sizeof(packet->pkt));
    sgs[0] = &hdr;

    ret = virtqueue_add_sgs(vq, sgs, 1, 0, packet, GFP_KERNEL);
    if (ret < 0) {
      vapp_send_queue_add(dtq->queue, &packet->node);
      break;
    }

    added = true;
  }

  if (added)
    virtqueue_kick(vq);

 not_run:
  virtio_vapp_dtq_unlock_tx(dtq);
}

void virtio_vapp_dtq_data_tx_work(struct work_struct* work)
{
  struct virtio_vapp_dtq* dtq = container_of(work, struct virtio_vapp_dtq, tx_work);
  struct virtqueue* vq = dtq->vqs[__VAPP_VQ_TX];
  bool added = false;

  virtio_vapp_dtq_lock_tx(dtq);

  if (!virtio_vapp_dtq_is_tx_run(dtq))
    goto not_run;

  do {
    struct vapp_send_data_packet* packet;
    unsigned int len;

    virtqueue_disable_cb(vq);
    while ((packet = virtqueue_get_buf(vq, &len)) != NULL) {
      // FIXME: review it
      // TODO: for program exits with NO_REPLY
      u64 map = vio_vapp_entity_set_map(packet->entity, packet->map);
      vapp_info("data-do-tx: vq: %p, entity->id: %llu, map: 0x%016llx\n", vq, packet->entity->id, map);
      if (map == vio_vapp_packet_get_expect_map_id(packet->entity->alloc)) {
        // init map for entity rx and free data
        pr_info("data-tx-callback: entity %llu finished\n", packet->entity->id);
        vio_vapp_entity_init_map(packet->entity);
        if (VIO_TYPE_IS_NOREPLY(packet->entity->type)) {
          vapp_entity_tree_remove(packet->tree, packet->entity);
          vio_vapp_entity_free(packet->entity);
        } else {
          vio_entity_set_status_req_check(packet->entity);
          wake_up(&packet->entity->wait);
        }
      }
      vapp_send_data_packet_free(packet);
      added = true;
    }
  } while (!virtqueue_enable_cb(vq));

 not_run:
  virtio_vapp_dtq_unlock_tx(dtq);

	if (added)
    virtio_vapp_queue_work(&dtq->send_work);
}

static int virtio_vapp_dtq_do_data_rx_work(struct vio_vapp_entity_cpu_header* header, struct vapp_send_data_packet* packet)
{
  struct vio_vapp_context* ctx;
  struct vio_vapp_entity_tree* tree;
  struct vio_vapp_entity* entity;
  u64 map;

  ctx = virtio_vapp_get_vio_dev(header->fid);
  if (!ctx) {
    pr_warn("data-rx: packet to an unexist or freed context (id: %lu)", (unsigned long)header->fid);
    return -ENODEV;
  }

  tree = virtio_vio_vapp_select_tree(ctx, header->type);
  entity = vapp_entity_tree_get(tree, header->id);

  if (!entity) {
    if (VIO_TYPE_IS_HOST(header->type))
      return -ENOBUFS;

    entity = vapp_entity_tree_get_or_insert_helper(tree, header);
    if (unlikely(!entity))
      return -ENOMEM;
  }

  if (entity->fid != header->fid)
    pr_warn("entity has different fid with header\n");

  if (wait_event_interruptible(entity->wait, vio_entity_status_has(entity, VIO_STATUS_REQ_CHECK)))
    return -EINTR;

  if (!entity->data || entity->alloc < header->alloc) {
    pr_info("data-do-rx: realloc entity data\n");
    if (entity->data)
      vio_vapp_entity_free_data(entity);

    entity->data = vio_vapp_entity_alloc_data(header->alloc);
    if (unlikely(!entity->data))
      return -ENOMEM;
    entity->alloc = header->alloc;
  }

  if (entity->alloc != header->alloc)
    entity->alloc = header->alloc;

  memcpy(entity->data + header->offset, packet->pkt.buffer, header->length);

  map = vio_vapp_entity_set_map(entity, header->map);
  vapp_info("data-do-rx: entity->id: %llu, map: 0x%016llx\n", entity->id, map);
  if (map == vio_vapp_packet_get_expect_map_id(entity->alloc)) {
    // TODO: implement notify
    pr_info("data-do-rx: notify (id: %llu, fid: %llu type: %u)\n", entity->id, header->fid, entity->type);
    vio_entity_unset_status_req_check(entity);
    struct vio_vapp_notify_entity* notify = vio_vapp_notify_entity_alloc(entity->id, entity->type);
    if (likely(notify))
      vio_vapp_notify_queue_mount(&ctx->queue, notify);
  }

  return 0;
}

void virtio_vapp_dtq_data_rx_work(struct work_struct* work)
{
  struct virtio_vapp_dtq* dtq = container_of(work, struct virtio_vapp_dtq, rx_work);
  struct virtqueue* vq = dtq->vqs[__VAPP_VQ_RX];

  virtio_vapp_dtq_lock_rx(dtq);

  if (!virtio_vapp_dtq_is_rx_run(dtq))
    goto not_run;

  do {
    virtqueue_disable_cb(vq);

    for (;;) {
      struct vio_vapp_entity_cpu_header header;
      struct vapp_send_data_packet* packet;
      unsigned int len;
      int ret;

      packet = virtqueue_get_buf(vq, &len);
      if (!packet)
        break;

      vapp_info("data-rx: vq %p: packet %p with len %u\n", vq, packet, len);

      if (unlikely(len < sizeof(packet->pkt.header))) {
        vapp_send_data_packet_free_with_data(packet);
        continue;
      }

      vapp_data_packet_header_to_cpu(&header, &packet->pkt.header);

      if (unlikely(len != sizeof(packet->pkt.header) + header.length)) {
        vapp_send_data_packet_free_with_data(packet);
        continue;
      }

      ret = virtio_vapp_dtq_do_data_rx_work(&header, packet);
      vapp_send_data_packet_free_with_data(packet);
      if (ret) {
        // TODO: unknow handle
      }
      dtq->rx_bufs--;
    }
  } while (!virtqueue_enable_cb(vq));

 not_run:
  if (dtq->rx_bufs < dtq->max_rx_bufs / VIRTIO_VAPP_DTQ_REFILL_DIV_FACTOR)
    virtio_vapp_dtq_data_rx_fill(dtq);
  virtio_vapp_dtq_unlock_rx(dtq);
}

void virtio_vapp_send_data_work(struct work_struct* work)
{
  struct virtio_vapp_dtq* dtq = container_of(work, struct virtio_vapp_dtq, send_work);
  struct virtqueue* vq = dtq->vqs[__VAPP_VQ_TX];
  struct scatterlist hdr,* ring,* sgs[2];
  bool added = false;

  virtio_vapp_dtq_lock_tx(dtq);

  if (!virtio_vapp_dtq_is_tx_run(dtq))
    goto not_run;

  ring = vmalloc(sizeof(*ring) * (VIO_VAPP_PKT_PAGES));
  if (!ring)
    goto not_run;


  for (;;) {
    struct vapp_send_data_packet* packet;
    struct vio_vapp_entity* entity;
    u32 length, offset, i;
    int rpages, ret;

    packet = vapp_send_queue_get(dtq->queue, struct vapp_send_data_packet, node);
    if (!packet)
      break;

    length = le32_to_cpu(packet->pkt.header.length);
    offset = le32_to_cpu(packet->pkt.header.offset);
    rpages = (length + PAGE_SIZE - 1) / PAGE_SIZE;
    entity = packet->entity;

    vapp_info("send-work: entity: %p, offset/length: %u/%u\n", entity, offset, length);

    sg_init_one(&hdr, &packet->pkt.header, sizeof(packet->pkt.header));
    sg_init_table(ring, rpages);

    for (i = 0; i < rpages; i++) {
      void* pdata = entity->data + offset + i * PAGE_SIZE;
      unsigned long poffset = offset_in_page(entity->data + offset + i * PAGE_SIZE);
      unsigned long plength = length >= (i+1) * PAGE_SIZE ? PAGE_SIZE : length - i * PAGE_SIZE;
      sg_set_page(&ring[i], vmalloc_to_page(pdata), plength, poffset);
    }

    sgs[0] = &hdr;
    sgs[1] = &ring[0];

    ret = virtqueue_add_sgs(vq, sgs, 2, 0, packet, GFP_KERNEL);
    if (ret < 0) {
      pr_info("vq %p insert failed ret %d", vq, ret);
      vapp_send_queue_add(dtq->queue, &packet->node);
      break;
    }

    added = true;
  }

  if (added)
    virtqueue_kick(vq);
  kvfree(ring);

 not_run:
  virtio_vapp_dtq_unlock_tx(dtq);
}

void virtio_vapp_dtq_ctrl_rx_fill(struct virtio_vapp_dtq* dtq)
{
  struct vapp_send_ctrl_data_packet* packet;
  struct scatterlist hdr,* sgs[1];
  struct virtqueue *vq;
  int ret;

  vq = dtq->vqs[__VAPP_VQ_RX];

  do {
    packet = vapp_send_ctrl_data_packet_alloc();
    if (!packet)
      break;

    sg_init_one(&hdr, &packet->pkt, sizeof(packet->pkt));
    sgs[0] = &hdr;

    ret = virtqueue_add_sgs(vq, sgs, 0, 1, packet, GFP_KERNEL);
    if (ret) {
      vapp_send_ctrl_data_packet_free(packet);
      break;
    }

    dtq->rx_bufs++;
  } while (vq->num_free);

  if (dtq->rx_bufs > dtq->max_rx_bufs)
    dtq->max_rx_bufs = dtq->rx_bufs;

  virtqueue_kick(vq);
}


void virtio_vapp_dtq_data_rx_fill(struct virtio_vapp_dtq* dtq)
{
  struct vapp_send_data_packet* packet;
  struct scatterlist hdr,* ring,* sgs[2];
  struct virtqueue *vq;
  size_t i;
  int ret;

  vq = dtq->vqs[__VAPP_VQ_RX];

  pr_info("dtq-fill: dtq %p fill\n", dtq);
  ring = kmalloc(sizeof(*ring) * (VIO_VAPP_PKT_PAGES), GFP_KERNEL);
  if (!ring)
    return;

  do {
    packet = vapp_send_data_packet_alloc();
    if (!packet)
      break;

    // packet->pkt.buffer = __vmalloc_node(VIO_VAPP_PKT_SIZE, PAGE_SHIFT, GFP_KERNEL, NUMA_NO_NODE, __builtin_return_address(0)) ;
    packet->pkt.buffer = vmalloc(VIO_VAPP_PKT_SIZE) ;
    if (!packet->pkt.buffer) {
      kvfree(packet);
      break;
    }

    sg_init_one(&hdr, &packet->pkt.header, sizeof(packet->pkt.header));
    sg_init_table(ring, VIO_VAPP_PKT_PAGES);

    for (i = 0; i < VIO_VAPP_PKT_PAGES; i++) {
      void* pdata = packet->pkt.buffer + i * PAGE_SIZE;
      unsigned long poffset = offset_in_page(packet->pkt.buffer);

      if (poffset != 0) {
        pr_warn("unexpect offset %lu\n", poffset);
        vapp_send_data_packet_free_with_data(packet);
        goto end;
      }

      sg_set_page(&ring[i], vmalloc_to_page(pdata), PAGE_SIZE, poffset);
    }

    sgs[0] = &hdr;
    sgs[1] = ring;

    ret = virtqueue_add_sgs(vq, sgs, 0, 2, packet, GFP_KERNEL);
    if (ret) {
      vapp_send_data_packet_free_with_data(packet);
      break;
    }

    dtq->rx_bufs++;
  } while (vq->num_free);

 end:
  if (dtq->rx_bufs > dtq->max_rx_bufs)
    dtq->max_rx_bufs = dtq->rx_bufs;

  kvfree(ring);
  virtqueue_kick(vq);
}

int __init vio_dtq_init(void)
{
  virtio_vapp_workqueue = alloc_workqueue("virtio_vapp", WQ_UNBOUND, 0);
	if (!virtio_vapp_workqueue)
		return -ENOMEM;

  return 0;
}

void vio_dtq_exit(void)
{
  destroy_workqueue(virtio_vapp_workqueue);
}
