// Copyright (c) 2025 Inochi Amaoto

/* SPDX-License-Identifier: GPL-2.0 */

#ifndef __VHOST_VAPP_DTQ_H__
#define __VHOST_VAPP_DTQ_H__

#include "vhost-vapp.h"

void vhost_vapp_dtq_ctrl_tx_work(struct vhost_work* work);
void vhost_vapp_dtq_ctrl_rx_work(struct vhost_work* work);
void vhost_vapp_ctrl_work(struct vhost_work* work);
void vhost_vapp_dtq_data_tx_work(struct vhost_work* work);
void vhost_vapp_dtq_data_rx_work(struct vhost_work* work);
void vhost_vapp_data_work(struct vhost_work* work);

void vhost_vapp_dtq_ctrl_tx_queue_work(struct work_struct* work);
void vhost_vapp_dtq_ctrl_rx_queue_work(struct work_struct* work);
void vhost_vapp_send_ctrl_queue_work(struct work_struct* work);
void vhost_vapp_dtq_data_tx_queue_work(struct work_struct* work);
void vhost_vapp_dtq_data_rx_queue_work(struct work_struct* work);
void vhost_vapp_send_data_queue_work(struct work_struct* work);

int vio_dtq_init(void);
void vio_dtq_exit(void);

static inline void vhost_vapp_dtq_flush(struct vhost_vapp_dtq* dtq)
{
  for (int i = 0; i < ARRAY_SIZE(dtq->vqs); i++)
    if (dtq->vqs[i].handle_kick)
      vhost_poll_flush(&dtq->vqs[i].poll);
}

static inline void vhost_vapp_dtq_set_features(struct vhost_vapp_dtq* dtq, u64 features)
{
  struct vhost_virtqueue *vq;
  for (int i = 0; i < ARRAY_SIZE(dtq->vqs); i++) {
    vq = &dtq->vqs[i];
    mutex_lock(&vq->mutex);
    vq->acked_features = features;
    mutex_unlock(&vq->mutex);
  }
}

static inline void vhost_vapp_dtq_work_queue(struct vhost_vapp_dtq* dtq)
{
  if (likely(dtq->vadev))
    vhost_work_queue(&dtq->vadev->dev, &dtq->work);
}

static inline void vhost_vapp_dtq_set_queue(struct vhost_vapp_dtq* dtq,struct vapp_send_queue* queue)
{
  dtq->queue = queue;
}

static inline void vhost_vapp_dtq_set_backend(struct vhost_vapp_dtq* dtq,struct vhost_vapp_dev* dev)
{
  dtq->vadev = dev;
}

static inline bool vhost_dtq_has_more_replies(struct vhost_vapp_dtq* dtq, struct vhost_virtqueue* vq)
{
  int val;

  smp_rmb();
  val = atomic_read(&dtq->queued_replies);

  return val < vq->num;
}

static inline void __vhost_vapp_dtq_init_common(struct vhost_vapp_dtq* dtq)
{
  atomic_set(&dtq->id_pool, 0);
  atomic_set(&dtq->queued_replies, 0);
}

static inline void vhost_vapp_dtq_init_ctrl(struct vhost_vapp_dtq* dtq)
{
  __vhost_vapp_dtq_init_common(dtq);
  vhost_work_init(&dtq->work, vhost_vapp_ctrl_work);
  dtq->vqs[__VAPP_VQ_RX].handle_kick = vhost_vapp_dtq_ctrl_rx_work;
  dtq->vqs[__VAPP_VQ_TX].handle_kick = vhost_vapp_dtq_ctrl_tx_work;
  INIT_WORK(&dtq->rx_work, vhost_vapp_dtq_ctrl_rx_queue_work);
  INIT_WORK(&dtq->tx_work, vhost_vapp_dtq_ctrl_tx_queue_work);
  INIT_WORK(&dtq->send_work, vhost_vapp_send_ctrl_queue_work);
}

static inline void vhost_vapp_dtq_init_data(struct vhost_vapp_dtq* dtq)
{
  __vhost_vapp_dtq_init_common(dtq);
  vhost_work_init(&dtq->work, vhost_vapp_data_work);
  dtq->vqs[__VAPP_VQ_RX].handle_kick = vhost_vapp_dtq_data_rx_work;
  dtq->vqs[__VAPP_VQ_TX].handle_kick = vhost_vapp_dtq_data_tx_work;
  INIT_WORK(&dtq->rx_work, vhost_vapp_dtq_data_rx_queue_work);
  INIT_WORK(&dtq->tx_work, vhost_vapp_dtq_data_tx_queue_work);
  INIT_WORK(&dtq->send_work, vhost_vapp_send_data_queue_work);
}

static inline u32 inline vhost_vapp_dtq_get_id(struct vhost_vapp_dtq* dtq)
{
  return atomic_fetch_add(1, &dtq->id_pool);
}

#endif // ! __VHOST_VAPP_DTQ_H__
