// Copyright (c) 2025 Inochi Amaoto

/* SPDX-License-Identifier: GPL-2.0 */

#define pr_fmt(fmt) "vhost-vapp-us: " fmt

#include <linux/slab.h>
#include <linux/hashtable.h>

#include "vhost-vapp.h"
#include "vhost-vapp-vio.h"
#include "vio-entity.h"
#include "vio-mm.h"

static struct vio_vapp_context vio_context;
static struct file* vio_fp = NULL;
static DEFINE_MUTEX(vio_mutex);

struct vio_vapp_context* vhost_vapp_get_vio_context(void)
{
  return &vio_context;
}


static long vio_vapp_ioctl_close_req(struct file* fp, unsigned long arg)
{
  struct vio_vapp_request req;
  // struct vio_vapp_context* ctx = fp->private_data;
  struct vhost_vapp_dev* dev;
  struct vio_vapp_entity_tree* tree;
  struct vio_vapp_entity* entity;
  long ret;

  if (raw_copy_from_user(&req, (void __user*)arg, sizeof(req)))
    return -EFAULT;

  pr_info("close req\n");
  pr_info("req.id: %llu\n", req.id);
  pr_info("req.uuid: %llu\n", req.uuid);
  pr_info("req.fp: %llu\n", req.fp);

  dev = vhost_vapp_get_dev(req.uuid);
  if (unlikely(!dev))
    return -ENODEV;

  tree = vhost_vapp_select_tree(dev, req.type);
  entity = vapp_entity_tree_get(tree, req.id);
  if (unlikely(!entity))
    return -ENXIO;

  if (unlikely(!vio_entity_status_is(entity, VIO_STATUS_REQ_MMAP)))
    return -EACCES;

  ret = vio_munmap_ctl(fp, entity, (unsigned long)req.addr);
  if (ret)
    return ret;

  vio_vapp_entity_free_data(entity);

  if (VIO_TYPE_IS_NOREPLY(entity->type)) {
    vapp_entity_tree_remove(tree, entity);
    vio_vapp_entity_free(entity);
    return 0;
  }

  vio_entity_unset_status_req(entity);
  vio_entity_unset_status_mapped(entity);
  vio_entity_set_status_rsp(entity);

  return 0;
}

static long vio_vapp_ioctl_open_req(struct file* fp, unsigned long arg)
{
  struct vio_vapp_request req;
  struct vio_vapp_notify_entity* notify;
  struct vhost_vapp_dev* dev;
  struct vio_vapp_entity_tree* tree;
  struct vio_vapp_entity* entity;
  struct vio_mmap_entity* ment;
  struct vio_vapp_context* ctx = fp->private_data;
  long ret;

  if (raw_copy_from_user(&req, (void __user*)arg, sizeof(req)))
    return -EFAULT;

  pr_info("open req\n");

 again:
  notify = vio_vapp_notify_queue_wait(&ctx->queue);
  if (IS_ERR(notify))
    return PTR_ERR(notify);

  req.uuid = notify->uuid;
  req.id = notify->id;
  req.type = notify->type;

  pr_info("req.id: %llu\n", req.id);
  pr_info("req.uuid: %llu\n", req.uuid);

  dev = vhost_vapp_get_dev(req.uuid);
  if (unlikely(!dev)) {
    pr_warn("notify from unexist vm: %llu\n", req.uuid);
    vio_vapp_notify_entity_free(notify);
    goto again;
  }

  tree = vhost_vapp_select_tree(dev, req.type);
  entity = vapp_entity_tree_get(tree, req.id);
  if (unlikely(!entity)) {
    pr_err("notify from unexist entity: %llu, uuid: %llu, possible leak\n", req.id, req.uuid);
    vio_vapp_notify_entity_free(notify);
    goto again;
  }

  /**
   * TODO: consider type for VIO_TYPE_HOST
   *
   * TODO: VIO_TYPE_GUEST is necessary to set status VIO_STATUS_REQ ?
   *       since it should has a different with new request.
   */
  if (unlikely(!vio_entity_status_is_inital(entity))) {
    pr_err("notify an non-inited entity: %llu, uuid: %llu, possible leak\n", req.id, req.uuid);
    vio_vapp_notify_entity_free(notify);
    goto again;
  }

  ment = vio_mmap_entity_tree_map_entity(&ctx->mtree, entity);
  if (IS_ERR(ment)) {
    ret = PTR_ERR(ment);
    goto ment_failed;
  }
  ment->entity = entity;

  req.addr = (void*)vio_mmap_ctl(fp, ment);
  if (IS_ERR_VALUE(req.addr)) {
    ret = (long)req.addr;
    goto mmap_failed;
  }

  req.count = entity->alloc;
  req.fp = entity->fid;

  if (raw_copy_to_user((void __user*)arg, &req, sizeof(req))) {
    ret = -EFAULT;
    goto user_failed;
  }

  pr_info("req.count: %llu\n", req.count);
  pr_info("req.addr: %p\n", req.addr);

  vio_entity_set_status_req(entity);
  vio_entity_set_status_mapped(entity);
  vio_vapp_notify_entity_free(notify);

  return 0;

 user_failed:
  vio_munmap_ctl(fp, entity, entity->uaddr);
 mmap_failed:
  vio_mmap_entity_tree_unmap_entity(&ctx->mtree, ment);
 ment_failed:
  vio_vapp_notify_queue_recovery_mount(&ctx->queue, notify);
  return ret;
}

static long vio_vapp_ioctl_open_rsp(struct file* fp, unsigned long arg)
{
  struct vio_vapp_respond rsp;
  struct vhost_vapp_dev* dev;
  struct vio_vapp_entity_tree* tree;
  struct vio_vapp_entity* entity;
  struct vio_mmap_entity* ment;
  struct vio_vapp_context* ctx = fp->private_data;
  long ret;

  if (raw_copy_from_user(&rsp, (void __user*)arg, sizeof(rsp)))
    return -EFAULT;

  if (rsp.count > VIO_VAPP_ENTITY_SIZE)
    return -E2BIG;

  pr_info("open rsp\n");
  pr_info("rsp.id: %llu\n", rsp.id);
  pr_info("rsp.uuid: %llu\n", rsp.uuid);
  pr_info("rsp.count: %llu\n", rsp.count);

  dev = vhost_vapp_get_dev(rsp.uuid);
  if (unlikely(!dev))
    return -ENODEV;

  tree = vhost_vapp_select_tree(dev, rsp.type);
  entity = vapp_entity_tree_get(tree, rsp.id);
  if (unlikely(!entity)) {
    pr_err("open respond for unexist entity: %llu, uuid: %llu, possible leak\n", rsp.id, rsp.uuid);
    return -ENXIO;
  }

  if (unlikely(!vio_entity_status_is(entity, VIO_STATUS_RSP)))
    return -EACCES;

  pr_info("rsp-open: get entity\n");

  entity->alloc = rsp.count;
  entity->data = vio_vapp_entity_alloc_data(rsp.count);
  if (!entity->data)
    return -ENOMEM;

  ment = vio_mmap_entity_tree_map_entity(&ctx->mtree, entity);
  if (IS_ERR(ment)) {
    ret = PTR_ERR(ment);
    goto ment_failed;
  }
  ment->entity = entity;
  pr_info("rsp-open: get mmap entity with id %lu\n", ment->id);

  ret = vio_mmap_ctl(fp, ment);
  if (IS_ERR_VALUE(ret))
    goto mmap_failed;

  rsp.addr = (void*)ret;
  entity->type = VIO_TYPE_CLEAR_GUEST_BIT(rsp.type) | VIO_TYPE_GET_GUEST_BIT(entity->type);
  rsp.type = entity->type;

  if (raw_copy_to_user((void __user*)arg, &rsp, sizeof(rsp))) {
    ret = -EFAULT;
    goto other_failed;
  }

  pr_info("rsp.addr: %p\n", rsp.addr);

  vio_entity_set_status_mapped(entity);

  return 0;

 other_failed:
  vio_munmap_ctl(fp, entity, entity->uaddr);
 mmap_failed:
  vio_mmap_entity_tree_unmap_entity(&ctx->mtree, ment);
 ment_failed:
  vio_vapp_entity_free_data(entity);
  return ret;
}

static long vio_vapp_ioctl_close_rsp(struct file* fp, unsigned long arg)
{
  struct vio_vapp_respond rsp;
  struct vhost_vapp_dev* dev;
  struct vio_vapp_entity_tree* tree;
  struct vio_vapp_entity* entity;
  long ret;

  if (raw_copy_from_user(&rsp, (void __user*)arg, sizeof(rsp)))
    return -EFAULT;

  pr_info("close rsp\n");
  pr_info("rsp.id: %llu\n", rsp.id);
  pr_info("rsp.uuid: %llu\n", rsp.uuid);

  if (rsp.count > VIO_VAPP_ENTITY_SIZE)
    return -E2BIG;

  dev = vhost_vapp_get_dev(rsp.uuid);
  if (unlikely(!dev))
    return -ENODEV;

  tree = vhost_vapp_select_tree(dev, rsp.type);
  entity = vapp_entity_tree_get(tree, rsp.id);
  if (unlikely(!entity)) {
    pr_err("open respond for unexist entity: %llu, uuid: %llu, possible leak\n", rsp.id, rsp.uuid);
    return -ENXIO;
  }

  if (unlikely(!vio_entity_status_is(entity,  VIO_STATUS_RSP_MMAP)))
    return -EACCES;

  if (unlikely(rsp.count > entity->alloc))
    return -EINVAL;

  ret = vio_munmap_ctl(fp, entity, (unsigned long)rsp.addr);
  if (ret)
    return ret;

  entity->alloc = rsp.count;
  entity->type = VIO_TYPE_CLEAR_GUEST_BIT(rsp.type) | VIO_TYPE_GET_GUEST_BIT(entity->type) | VIO_TYPE_NOREPLY | VIO_TYPE_GUEST;

  vio_vapp_entity_init_map(entity);
  // TODO: send reply;
  if (vio_vapp_entity_send_packet(entity, tree, dev))
    return -EAGAIN;

  vio_entity_set_status_rsp(entity);
  vio_entity_set_status_req(entity);
  vio_entity_unset_status_mapped(entity);

  return 0;
}

static long vio_vapp_dev_ioctl(struct file* fp, unsigned int cmd, unsigned long arg)
{
  switch (cmd) {
    case VIO_VAPP_IO_OPEN_REQ:
      return vio_vapp_ioctl_open_req(fp, arg);
    case VIO_VAPP_IO_CLOSE_REQ:
      return vio_vapp_ioctl_close_req(fp, arg);
    case VIO_VAPP_IO_OPEN_RSP:
      return vio_vapp_ioctl_open_rsp(fp, arg);
    case VIO_VAPP_IO_CLOSE_RSP:
      return vio_vapp_ioctl_close_rsp(fp, arg);
    default:
      return -ENOIOCTLCMD;
  }
  return 0;
}

static __poll_t vio_vapp_dev_poll(struct file* fp, struct poll_table_struct* table)
{
  return 0;
}

static int vio_vapp_dev_mmap(struct file* fp, struct vm_area_struct* vma)
{
  struct vio_vapp_context* ctx = fp->private_data;
  struct vio_mmap_entity* ment = vio_mmap_entity_tree_get_map_entity(&ctx->mtree, vma->vm_pgoff);
  pr_info("vio-mmap: ment %p, id: %lu\n", ment, vma->vm_pgoff);
  if (unlikely(!ment))
    return -ENODEV;

  vma->vm_ops = &vio_vm_ops;
  vma->vm_flags |= VIO_MMAP_FLAG;
  vma->vm_private_data = ment;

  return 0;
}

static int vio_vapp_dev_open(struct inode *inode, struct file *fp)
{
  mutex_lock(&vio_mutex);
  if (unlikely(vio_fp)) {
    mutex_unlock(&vio_mutex);
    return -EEXIST;
  }

  vio_fp = fp ;
  mutex_unlock(&vio_mutex);

  fp->private_data = &vio_context;
  return 0;
}

static int vio_vapp_dev_release(struct inode *inode, struct file *fp)
{
  mutex_lock(&vio_mutex);
  vio_fp = NULL;
  mutex_unlock(&vio_mutex);

  return 0;
}

static const struct file_operations vio_vapp_fops = {
  .owner          = THIS_MODULE,
  .open           = vio_vapp_dev_open,
  .release        = vio_vapp_dev_release,
  .poll           = vio_vapp_dev_poll,
  .unlocked_ioctl = vio_vapp_dev_ioctl,
  .llseek         = noop_llseek,
  .mmap           = vio_vapp_dev_mmap,
  .compat_ioctl   = compat_ptr_ioctl,
};

static struct miscdevice vio_vapp_misc = {
  .minor  = VAPP_USER_DEVICE_MINOR,
  .name   = VAPP_USER_DEVICE_NAME,
  .fops   = &vio_vapp_fops,
  .mode   = VAPP_USER_DEVICE_MODE,
};

int __init vio_vapp_init(void)
{
  vio_vapp_context_init(&vio_context);
  return misc_register(&vio_vapp_misc);
}

void __exit vio_vapp_exit(void)
{
  misc_deregister(&vio_vapp_misc);
  vio_vapp_context_deinit(&vio_context);
}
