// Copyright (c) 2025 Inochi Amaoto

/* SPDX-License-Identifier: GPL-2.0 */

#define pr_fmt(fmt) "virtio-vapp-us: " fmt

#include "virtio-vapp.h"
#include "virtio-vapp-dtq.h"
#include "virtio-vapp-vio.h"
#include "vio-entity.h"
#include "vio-mm.h"

static DEFINE_MUTEX(virtio_vio_mutex);
static DEFINE_READ_MOSTLY_HASHTABLE(virtio_vio_hash, 8);
static atomic_t vio_id_pool = ATOMIC_INIT(1);

static struct vio_vapp_context* __virtio_vapp_get_vio_dev(u64 fp)
{
  struct vio_vapp_context* vapp;
  hash_for_each_possible_rcu(virtio_vio_hash, vapp, node, (unsigned long)fp) {
    u64 ofp = vapp->id;

    /* Skip instances that have no uuid yet */
    if (ofp == 0)
      continue;

    if (ofp == fp)
      return vapp;
  }

  return NULL;
}

struct vio_vapp_context* virtio_vapp_get_vio_dev(u64 fp)
{
  struct vio_vapp_context* vapp;
  mutex_lock(&virtio_vio_mutex);
  vapp = __virtio_vapp_get_vio_dev(fp);
  mutex_unlock(&virtio_vio_mutex);
  return vapp;
}

static inline u64 vio_get_id(void)
{
  return atomic_fetch_add(1, &vio_id_pool);
}

static long vio_vapp_ioctl_open_req(struct file* fp, unsigned long arg)
{
  struct vio_vapp_request req;
  struct vio_vapp_entity_tree* tree;
  struct vio_vapp_entity* entity;
  struct vio_mmap_entity* ment;
  struct vio_vapp_context* vapp = fp->private_data;
  long ret;

  if (raw_copy_from_user(&req, (void __user*)arg, sizeof(req)))
    return -EFAULT;

  pr_info("open req\n");
  pr_info("req.id: %llu\n", req.id);
  pr_info("req.count: %llu\n", req.count);

  if (req.count > VIO_VAPP_ENTITY_SIZE)
    return -E2BIG;

  tree = virtio_vio_vapp_select_tree(vapp, VIO_TYPE_GUEST);
  entity = vio_vapp_entity_alloc(virtio_vapp_get_dev_id(), req.count);
  if (IS_ERR(entity))
    return -ENOMEM;

  if (vapp_entity_tree_insert(tree, entity)) {
    ret = -EAGAIN;
    goto insert_failed;
  }

  ment = vio_mmap_entity_tree_map_entity(&vapp->mtree, entity);
  if (IS_ERR(ment)) {
    ret = PTR_ERR(ment);
    goto ment_failed;
  }
  ment->entity = entity;

  ret = vio_mmap_ctl(fp, ment);
  if (IS_ERR_VALUE(ret))
    goto mmap_failed;

  req.addr = (void*)ret;

  entity->type = VIO_TYPE_CLEAR_GUEST_BIT(req.type) | VIO_TYPE_GUEST;
  req.type = entity->type;
  req.id = entity->id;

  if (raw_copy_to_user((void __user*)arg, &req, sizeof(req))) {
    ret = -EFAULT;
    goto other_failed;
  }

  pr_info("req.addr: %p\n", req.addr);

  vio_entity_set_status_mapped(entity);
  vio_entity_set_status_req(entity);
  vio_vapp_entity_set_fid(entity, vapp->id);

  return 0;

 other_failed:
  vio_munmap_ctl(fp, entity, entity->uaddr);
 mmap_failed:
  vio_mmap_entity_tree_unmap_entity(&vapp->mtree, ment);
 ment_failed:
  vapp_entity_tree_remove(tree, entity);
 insert_failed:
  vio_vapp_entity_free(entity);
  return ret;
}
static long vio_vapp_ioctl_close_req(struct file* fp, unsigned long arg)
{
  struct vio_vapp_request req;
  struct vio_vapp_entity_tree* tree;
  struct vio_vapp_entity* entity;
  struct vio_vapp_context* vapp = fp->private_data;
  long ret;

  if (raw_copy_from_user(&req, (void __user*)arg, sizeof(req)))
    return -EFAULT;

  pr_info("close req\n");
  pr_info("req.count: %llu\n", req.count);
  pr_info("req.id: %llu\n", req.id);

  if (req.count > VIO_VAPP_ENTITY_SIZE)
    return -E2BIG;

  tree = virtio_vio_vapp_select_tree(vapp, VIO_TYPE_GUEST);
  entity = vapp_entity_tree_get(tree, req.id);
  if (unlikely(!entity))
    return -ENXIO;

  if (unlikely(!vio_entity_status_is(entity, VIO_STATUS_REQ_MMAP)))
    return -EACCES;

  if (unlikely(req.count > entity->alloc))
    return -EINVAL;

  ret = vio_munmap_ctl(fp, entity, (unsigned long)req.addr);
  if (ret)
    return ret;
  pr_info("req-closed: munmap ok\n");

  entity->alloc = req.count;
  entity->type = VIO_TYPE_CLEAR_GUEST_BIT(req.type) | VIO_TYPE_GET_GUEST_BIT(entity->type) | VIO_TYPE_GUEST;

  vio_vapp_entity_init_map(entity);

  if (vio_vapp_entity_send_packet(entity, tree))
    return -EAGAIN;

  pr_info("req.id: %llu\n", req.id);

  vio_entity_unset_status_req(entity);
  vio_entity_unset_status_mapped(entity);
  vio_entity_set_status_rsp(entity);

  return 0;
}

static long vio_vapp_ioctl_open_rsp(struct file* fp, unsigned long arg)
{
  struct vio_vapp_respond rsp;
  struct vio_vapp_entity_tree* tree;
  struct vio_vapp_entity* entity;
  struct vio_vapp_notify_entity* notify;
  struct vio_mmap_entity* ment;
  struct vio_vapp_context* vapp = fp->private_data;
  long ret;
  u32 status;

  if (raw_copy_from_user(&rsp, (void __user*)arg, sizeof(rsp)))
    return -EFAULT;

  pr_info("open rsp\n");
  pr_info("rsp.id: %llu\n", rsp.id);
  pr_info("rsp.type: %u\n", rsp.type);

  tree = virtio_vio_vapp_select_tree(vapp, VIO_TYPE_GUEST);
  entity = vapp_entity_tree_get(tree, rsp.id);
  if (unlikely(!entity))
    return -ENXIO;

  status = vio_vapp_entity_get_status(entity);
  if (!((status == VIO_STATUS_RSP) || (status | VIO_STATUS_REQ_CHECK)))
    return -EACCES;

  notify = vio_vapp_notify_queue_wait(&vapp->queue, rsp.id);
  if (IS_ERR(notify))
    return PTR_ERR(notify);

  ment = vio_mmap_entity_tree_map_entity(&vapp->mtree, entity);
  if (IS_ERR(ment)) {
    ret = PTR_ERR(ment);
    goto ment_failed;
  }
  ment->entity = entity;

  ret = vio_mmap_ctl(fp, ment);
  if (IS_ERR_VALUE(ret))
    goto mmap_failed;

  rsp.addr = (void*)ret;
  rsp.count = entity->alloc;

  if (raw_copy_to_user((void __user*)arg, &rsp, sizeof(rsp))) {
    ret = -EFAULT;
    goto user_failed;
  }

  pr_info("rsp.count: %llu\n", rsp.count);

  vio_entity_set_status_mapped(entity);
  vio_vapp_notify_entity_free(notify);

  return 0;

 user_failed:
  vio_munmap_ctl(fp, entity, entity->uaddr);
 mmap_failed:
  vio_mmap_entity_tree_unmap_entity(&vapp->mtree, ment);
 ment_failed:
  vio_vapp_notify_queue_recovery_mount(&vapp->queue, notify);
  return ret;
}
static long vio_vapp_ioctl_close_rsp(struct file* fp, unsigned long arg)
{
  struct vio_vapp_respond rsp;
  struct vio_vapp_entity_tree* tree;
  struct vio_vapp_entity* entity;
  struct vio_vapp_context* vapp = fp->private_data;
  long ret;

  if (raw_copy_from_user(&rsp, (void __user*)arg, sizeof(rsp)))
    return -EFAULT;

  pr_info("rsp-cloase: rsp.id: %llu\n", rsp.id);

  tree = virtio_vio_vapp_select_tree(vapp, VIO_TYPE_GUEST);
  entity = vapp_entity_tree_get(tree, rsp.id);
  if (unlikely(!entity))
    return -ENXIO;

  if (unlikely(!vio_entity_status_is(entity, VIO_STATUS_RSP | VIO_STATUS_MMAP)))
    return -EACCES;

  ret = vio_munmap_ctl(fp, entity, (unsigned long)rsp.addr);
  if (ret)
    return ret;

  vapp_entity_tree_remove(tree, entity);
  vio_vapp_entity_free(entity);

  return 0;
}

static long vio_vapp_dev_ioctl(struct file* fp, unsigned int cmd, unsigned long arg)
{
  switch (cmd) {
    case VIO_VAPP_IO_OPEN_REQ:
      return vio_vapp_ioctl_open_req(fp, arg);
    case VIO_VAPP_IO_CLOSE_REQ:
      return vio_vapp_ioctl_close_req(fp, arg);
    case VIO_VAPP_IO_OPEN_RSP:
      return vio_vapp_ioctl_open_rsp(fp, arg);
    case VIO_VAPP_IO_CLOSE_RSP:
      return vio_vapp_ioctl_close_rsp(fp, arg);
    default:
      return -ENOIOCTLCMD;
  }
  return 0;
}

static int vio_vapp_dev_mmap(struct file* fp, struct vm_area_struct* vma)
{
  struct vio_vapp_context* ctx = fp->private_data;
  struct vio_mmap_entity* ment = vio_mmap_entity_tree_get_map_entity(&ctx->mtree, vma->vm_pgoff);
  pr_info("vio-mmap: ment %p, id: %lu\n", ment, vma->vm_pgoff);
  if (unlikely(!ment))
    return -ENODEV;

  vma->vm_ops = &vio_vm_ops;
  vma->vm_flags |= VIO_MMAP_FLAG;
  vma->vm_private_data = ment;

  return 0;
}

static int vio_vapp_dev_open(struct inode *inode, struct file *fp)
{
  struct vio_vapp_context* ctx;
  u64 id;

 again:
  id = vio_get_id();
  if (unlikely(!id))
    goto again;

  pr_info("open: fid %llu", id);

  mutex_lock(&virtio_vio_mutex);
  ctx = __virtio_vapp_get_vio_dev(id);
  if (unlikely(ctx)) {
    mutex_unlock(&virtio_vio_mutex);
    return -EEXIST;
  }

  ctx = vio_vapp_context_alloc(id);
  if (IS_ERR(ctx)) {
    mutex_unlock(&virtio_vio_mutex);
    return PTR_ERR(ctx);
  }

  hash_add_rcu(virtio_vio_hash, &ctx->node, ctx->id);
  mutex_unlock(&virtio_vio_mutex);

  fp->private_data = ctx;

  return 0;
}

static int vio_vapp_dev_release(struct inode *inode, struct file *fp)
{
  struct vio_vapp_context* ctx = fp->private_data;

  mutex_lock(&virtio_vio_mutex);
  if (ctx->id)
    hash_del_rcu(&ctx->node);
  mutex_unlock(&virtio_vio_mutex);

  vio_vapp_context_deinit(ctx);
  vio_vapp_context_free(ctx);

  return 0;
}


static const struct file_operations vio_vapp_fops = {
  .owner          = THIS_MODULE,
  .open           = vio_vapp_dev_open,
  .release        = vio_vapp_dev_release,
  // .poll           = vio_vapp_dev_poll,
  .unlocked_ioctl = vio_vapp_dev_ioctl,
  .llseek         = noop_llseek,
  .mmap           = vio_vapp_dev_mmap,
  .compat_ioctl   = compat_ptr_ioctl,
};

static struct miscdevice vio_vapp_misc = {
  .minor  = VAPP_USER_DEVICE_MINOR,
  .name   = VAPP_USER_DEVICE_NAME,
  .fops   = &vio_vapp_fops,
  .mode   = VAPP_USER_DEVICE_MODE,
};

int vio_vapp_init(void)
{
  return misc_register(&vio_vapp_misc);
}

void vio_vapp_exit(void)
{
  misc_deregister(&vio_vapp_misc);
}
