// Copyright (c) 2025 Inochi Amaoto

/* SPDX-License-Identifier: GPL-2.0 */

#ifndef __VAPP_COMMON_H__
#define __VAPP_COMMON_H__

#include <linux/list.h>
#include <linux/rbtree.h>
#include <linux/virtio_vapp.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/kthread.h>

#include "vhost.h"

// #define vapp_info(fmt, ...) pr_info(fmt, ##__VA_ARGS__)
#define vapp_info(fmt, ...)

struct vio_vapp_entity_header {
  __le64 id;
  __le64 fid;
  __le64 map;
  __le32 type;
  __le32 alloc;
  __le32 offset;
  __le32 length;
};

struct vio_vapp_entity_cpu_header {
  u64 id;
  u64 fid;
  u64 map;
  u32 type;
  u32 alloc;
  u32 offset;
  u32 length;
};

struct vapp_data_packet {
  struct vio_vapp_entity_header header;
  char* buffer;
};

struct vapp_send_data_packet {
  struct vapp_data_packet pkt;
  struct list_head node;
  struct vio_vapp_entity* entity;
  struct vio_vapp_entity_tree* tree;
  u64 map;
};

struct vapp_ctrl_data_packet {
  __le32 op;
  __le32 feature;
  u8 data[48];
} __packed;

struct vapp_send_ctrl_data_packet {
  struct vapp_ctrl_data_packet pkt;
  struct list_head node;
};

struct vapp_send_queue {
  struct list_head queue;
  spinlock_t lock;
};

static inline void vapp_data_packet_header_to_virtio(struct vio_vapp_entity_header* dst, struct vio_vapp_entity_cpu_header* src)
{
  dst->id = cpu_to_le64(src->id);
  dst->fid = cpu_to_le64(src->fid);
  dst->map = cpu_to_le64(src->map);
  dst->type = cpu_to_le32(src->type);
  dst->alloc = cpu_to_le32(src->alloc);
  dst->offset = cpu_to_le32(src->offset);
  dst->length = cpu_to_le32(src->length);
}

static inline void vapp_data_packet_header_to_cpu(struct vio_vapp_entity_cpu_header* dst, struct vio_vapp_entity_header* src)
{
  dst->id = le64_to_cpu(src->id);
  dst->fid = le64_to_cpu(src->fid);
  dst->map = le64_to_cpu(src->map);
  dst->type = le32_to_cpu(src->type);
  dst->alloc = le32_to_cpu(src->alloc);
  dst->offset = le32_to_cpu(src->offset);
  dst->length = le32_to_cpu(src->length);}

static inline void vapp_send_queue_init(struct vapp_send_queue* queue)
{
  spin_lock_init(&queue->lock);
  INIT_LIST_HEAD(&queue->queue);
}

static inline void vapp_send_queue_add(struct vapp_send_queue* queue, struct list_head* node)
{
  spin_lock_bh(&queue->lock);
  list_add(node, &queue->queue);
  spin_unlock_bh(&queue->lock);
}

static inline void vapp_send_queue_add_tail(struct vapp_send_queue* queue, struct list_head* node)
{
  spin_lock_bh(&queue->lock);
  list_add_tail(node, &queue->queue);
  spin_unlock_bh(&queue->lock);
}

static inline struct vapp_send_ctrl_data_packet* vapp_send_ctrl_data_packet_alloc(void)
{
  struct vapp_send_ctrl_data_packet* pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
  if (likely(pkt))
    INIT_LIST_HEAD(&pkt->node);

  return pkt;
}

static inline void vapp_send_ctrl_data_packet_free(struct vapp_send_ctrl_data_packet* pkt)
{
  kvfree(pkt);
}

static inline struct vapp_send_data_packet* vapp_send_data_packet_alloc(void)
{
  struct vapp_send_data_packet* pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
  if (likely(pkt))
    INIT_LIST_HEAD(&pkt->node);

  return pkt;
}

static inline void vapp_send_data_packet_free(struct vapp_send_data_packet* pkt)
{
  kvfree(pkt);
}

static inline void vapp_send_data_packet_free_with_data(struct vapp_send_data_packet* pkt)
{
  if (pkt->pkt.buffer)
    kvfree(pkt->pkt.buffer);
  kvfree(pkt);
}

#define vapp_send_queue_get(_queue, _type, _member) ({ \
  _type* __packet = NULL; \
  spin_lock_bh(&(_queue)->lock); \
  if (!list_empty(&(_queue)->queue)) { \
    __packet = list_first_entry(&(_queue)->queue, _type, _member); \
    list_del_init(&__packet->_member); \
  } \
  spin_unlock_bh(&(_queue)->lock); \
  __packet; \
})

#define vapp_send_queue_destory(_queue, _type, _member, _free) \
  do { \
    _type* __n,* __pos; \
    spin_lock_bh(&(_queue)->lock); \
    list_for_each_entry_safe(__pos, __n, &(_queue)->queue, _member) { \
      list_del(&__pos->_member); \
      _free(__pos); \
    } \
    spin_unlock_bh(&(_queue)->lock); \
  } while(0)

#define VAPP_USER_DEVICE_MINOR MISC_DYNAMIC_MINOR
#define VAPP_USER_DEVICE_MODE (umode_t)(S_IRUGO | S_IWUGO)

#define VIO_MMAP_FLAG (VM_IO | VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY)

enum {
  VIO_MMAP_BIT        = 0,
  VIO_REQ_BIT         = 1,
  VIO_RSP_BIT         = 2,
  VIO_REQ_CHECK_BIT   = 3,
};

struct vio_vapp_entity {
  struct rb_node node;
  u64 id;
  u64 fid;
  u64 alloc;
  atomic64_t now;
  u32 type;
#define VIO_STATUS_INITAL       0x00U
#define VIO_STATUS_MMAP         (1U << (VIO_MMAP_BIT))
#define VIO_STATUS_REQ          (1U << (VIO_REQ_BIT))
#define VIO_STATUS_RSP          (1U << (VIO_RSP_BIT))
#define VIO_STATUS_REQ_CHECK    (1U << (VIO_REQ_CHECK_BIT))
  atomic_t status;
  wait_queue_head_t wait;
  unsigned long uaddr;
  void* data;
};

// helper status
#define VIO_STATUS_REQ_MMAP (VIO_STATUS_MMAP | VIO_STATUS_REQ)
#define VIO_STATUS_RSP_MMAP (VIO_STATUS_MMAP | VIO_STATUS_RSP)

struct vio_vapp_entity_tree {
  struct rb_root root;
  spinlock_t lock;
};

#define vio_vapp_entity_id_cmp(_x, _y) ((_x) < (_y))

static inline u64 vio_vapp_packet_nr_pkt(u64 size)
{
  return (size + VIO_VAPP_PKT_SIZE - 1) >> VIO_VAPP_PKT_SHIFT;
}

static inline u64 vio_vapp_packet_get_expect_map_id(u64 size)
{
  u64 nr = vio_vapp_packet_nr_pkt(size);
  return (nr == 64) ? -1 : (((u64)1) << nr) - 1;
}

static inline u64 vio_vapp_packet_get_map_id(u64 offset)
{
  return ((u64)1) << (offset >> VIO_VAPP_PKT_SHIFT);
}

static inline u64 vio_vapp_entity_set_map(struct vio_vapp_entity* entity, u64 map)
{
  return atomic64_fetch_or(map, &entity->now) | map;
}

static inline void vio_vapp_entity_init_map(struct vio_vapp_entity* entity)
{
  atomic64_set(&entity->now, 0);
}

struct vio_mmap_entity {
  struct rb_node map_node;
  struct list_head idle_node;
  struct vio_vapp_entity* entity;
  unsigned long id;
};

struct vio_mmap_entity_tree {
  void* _map[VIO_VAPP_MAX_MMAP];

  struct list_head idle;
  spinlock_t idle_lock;

  wait_queue_head_t wait;

  int id;
};

#endif // ! __VAPP_COMMON_H__
