// Copyright (c) 2025 Inochi Amaoto

/* SPDX-License-Identifier: GPL-2.0 */

#ifndef __VIO_MM_H__
#define __VIO_MM_H__

#include "vio-entity.h"
#include <linux/mm.h>

#define vio_vapp_mm_entity_id_cmp(_x, _y) ((_x) < (_y))

static inline int vio_mmap_entity_tree_get_idle_id(struct vio_mmap_entity_tree* mtree)
{
  if (mtree->id > VIO_VAPP_MAX_MMAP)
    return -1;
  return (mtree->id)++;
}

static inline void vio_mmap_entity_tree_return_idle_id(struct vio_mmap_entity_tree* mtree)
{
  (mtree->id)--;
}


static inline void vio_mmap_entity_init(struct vio_mmap_entity* ment, int id)
{
  ment->id = id;
  INIT_LIST_HEAD(&ment->idle_node);
}

static struct vio_mmap_entity* __vio_mmap_entity_tree_get_idle_entity(struct vio_mmap_entity_tree* mtree)
{
  struct vio_mmap_entity* ment = NULL;
  spin_lock(&mtree->idle_lock);
  ment = list_first_entry_or_null(&mtree->idle, struct vio_mmap_entity, idle_node);
  if (ment)
    list_del_init(&ment->idle_node);
  spin_unlock(&mtree->idle_lock);

  return ment;
}

static struct vio_mmap_entity* vio_mmap_entity_tree_get_idle_entity(struct vio_mmap_entity_tree* mtree)
{
  int ret;
  int id;
  struct vio_mmap_entity* ment = NULL;
  spin_lock(&mtree->idle_lock);

  if (!list_empty(&mtree->idle)) {
    ment = list_first_entry(&mtree->idle, struct vio_mmap_entity, idle_node);
    list_del_init(&ment->idle_node);
    goto end;
  }

  id = vio_mmap_entity_tree_get_idle_id(mtree);
  if (id < 0)
    goto wait;

  ment = kmalloc(sizeof(struct vio_mmap_entity), GFP_KERNEL);
  if (!ment) {
    vio_mmap_entity_tree_return_idle_id(mtree);
    ment = ERR_PTR(-ENOMEM);
    goto end;
  }

  vio_mmap_entity_init(ment, id);

 end:
  spin_unlock(&mtree->idle_lock);
  return ment;

 wait:
  spin_unlock(&mtree->idle_lock);

  ret = wait_event_interruptible(mtree->wait, (ment = __vio_mmap_entity_tree_get_idle_entity(mtree)));
  if (ret)
    return ERR_PTR(ret);

  return ment;
}

static void vio_mmap_entity_tree_return_idle_entity(struct vio_mmap_entity_tree* mtree, struct vio_mmap_entity* ment)
{
  spin_lock(&mtree->idle_lock);
  if (ment->idle_node.next != ment->idle_node.prev)
    pr_err("ment (%lu) is not init as head\n", ment->id);
  list_add_tail(&ment->idle_node, &mtree->idle);
  spin_unlock(&mtree->idle_lock);

  wake_up(&mtree->wait);
}

static struct vio_mmap_entity* vio_mmap_entity_tree_get_map_entity(struct vio_mmap_entity_tree* mtree, u64 id)
{
  if (likely(id < VIO_VAPP_MAX_MMAP))
    return mtree->_map[id];
  return NULL;
}

static int vio_mmap_entity_tree_insert_map_entity(struct vio_mmap_entity_tree* mtree, struct vio_mmap_entity* node)
{
  unsigned long id = node->id;
  if (unlikely(id > VIO_VAPP_MAX_MMAP))
    return -ENODEV;

  mtree->_map[id] = node;
  return 0;
}

static void __vio_mmap_entity_tree_remove_map_entity(struct vio_mmap_entity_tree* mtree, struct vio_mmap_entity* ment)
{
  mtree->_map[ment->id] = NULL;
}

static void vio_mmap_entity_tree_remove_map_entity(struct vio_mmap_entity_tree* mtree, struct vio_mmap_entity* ment)
{
  __vio_mmap_entity_tree_remove_map_entity(mtree, ment);
}

static inline void vio_mmap_entity_tree_unmap_entity(struct vio_mmap_entity_tree* mtree, struct vio_mmap_entity* ment)
{
  vio_mmap_entity_tree_remove_map_entity(mtree, ment);
  vio_mmap_entity_tree_return_idle_entity(mtree, ment);
}

static inline struct vio_mmap_entity* vio_mmap_entity_tree_map_entity(struct vio_mmap_entity_tree* mtree, struct vio_vapp_entity* entity)
{
  struct vio_mmap_entity* ment;

  ment = vio_mmap_entity_tree_get_idle_entity(mtree);
  if (IS_ERR(ment))
    return ment;

  ment->entity = entity;

  if (vio_mmap_entity_tree_insert_map_entity(mtree, ment) < 0) {
    vio_mmap_entity_tree_return_idle_entity(mtree, ment);
    return ERR_PTR(-EEXIST);
  }

  return ment;
}

static inline void vio_mmap_entity_tree_map_destroy(struct vio_mmap_entity_tree* mtree)
{
  spin_lock(&mtree->idle_lock);
  for (unsigned long i = 0; i < VIO_VAPP_MAX_MMAP; ++i) {
    if (mtree->_map[i]) {
      struct vio_mmap_entity* ment = mtree->_map[i];

      pr_info("vio-ment-del: map node id %lu\n", ment->id);
      __vio_mmap_entity_tree_remove_map_entity(mtree, ment);
      kvfree(ment);
      mtree->_map[i] = NULL;
    }
  }
  spin_unlock(&mtree->idle_lock);
}

static inline void vio_mmap_entity_tree_idle_destroy(struct vio_mmap_entity_tree* mtree)
{
  struct vio_mmap_entity* pos,* n;
  spin_lock(&mtree->idle_lock);

  list_for_each_entry_safe(pos, n, &mtree->idle, idle_node) {
    pr_info("vio-ment-del: idle node %p id %lu\n", pos, pos->id);
    list_del(&pos->idle_node);
    kvfree(pos);
  }

  spin_unlock(&mtree->idle_lock);
}

static inline void vio_mmap_entity_tree_destroy(struct vio_mmap_entity_tree* mtree)
{
  vio_mmap_entity_tree_map_destroy(mtree);
  vio_mmap_entity_tree_idle_destroy(mtree);
}

static inline void vio_mmap_entity_tree_init(struct vio_mmap_entity_tree* mtree)
{
  memset(mtree->_map, 0x00, sizeof(mtree->_map));
  INIT_LIST_HEAD(&mtree->idle);
  spin_lock_init(&mtree->idle_lock);
  init_waitqueue_head(&mtree->wait);
  mtree->id = 1;
}

struct vio_mmap_entity_tree* vio_mmap_entity_tree_from_file(struct file* fp);

static long vio_munmap_ctl(struct file* fp, struct vio_vapp_entity* entity, unsigned long addr)
{
  if (!entity->uaddr)
    return 0;

  if (entity->uaddr != addr)
    return -EBADFD;

  pr_info("vio-munmap: entity->uaddr: %p\n", (void*)entity->uaddr);
  vm_munmap(entity->uaddr, entity->alloc);
  entity->uaddr = 0;
  return 0;
}

static long vio_mmap_ctl(struct file* fp, struct vio_mmap_entity* ment)
{
  unsigned long addr;
  long ret = 0;

  addr = vm_mmap(fp, 0, ment->entity->alloc, PROT_READ | PROT_WRITE, MAP_SHARED, (ment->id) << PAGE_SHIFT);
  if (IS_ERR_VALUE(addr)) {
    ret = addr;
    goto mmap_failed;
  }
  ment->entity->uaddr = addr;
  addr = PAGE_ALIGN(addr);

  pr_info("vio-mmap: mmap vaddr:%p(%016lx) id: %lu\n", (void*)addr, addr, ment->id);

  return addr;

 mmap_failed:
  pr_info("vio-mmap: mmap failed: %ld\n", ret);
  return ret;
}

static void vio_vm_close(struct vm_area_struct *vma)
{
  struct file* fp = vma->vm_file;
  struct vio_mmap_entity* ment = vma->vm_private_data;
  struct vio_mmap_entity_tree* mtree;

  if (unlikely(!fp)) {
    pr_warn("vio-vm-close: mmap entity lost %lu\n", ment->id);
    return;
  }

  mtree = vio_mmap_entity_tree_from_file(fp);
  pr_info("vio-vm-close: entity: %llu, ment: %lu\n", ment->entity->id, ment->id);
  vio_mmap_entity_tree_unmap_entity(mtree, ment);
}

static vm_fault_t vio_vm_fault(struct vm_fault* vmf) {
  struct vm_area_struct* vma = vmf->vma;
  struct vio_mmap_entity* ment = vma->vm_private_data;
  struct page* page;

  if (!ment)
    return VM_FAULT_SIGBUS;

  // pr_info("vm_fault: vma:%lu, vmf:%lu\n", vma->vm_pgoff, vmf->pgoff);
  // pr_info("vm_fault: entity->data/(offset) %p/%p\n", entity->data, (void*)(((unsigned long)(entity->data)) + ((vmf->pgoff - vma->vm_pgoff) << PAGE_SHIFT)));

  page = vmalloc_to_page((void*)(((unsigned long)(ment->entity->data)) + ((vmf->pgoff - vma->vm_pgoff) << PAGE_SHIFT)));
  if (!page)
    return VM_FAULT_SIGBUS;

  get_page(page);
  vmf->page = page;

  return 0;
}

static struct vm_operations_struct vio_vm_ops = {
  .fault = vio_vm_fault,
  .close = vio_vm_close,
};

#endif // ! __VIO_MM_H__
