#include <arch/cpu.h>
#include <mm/rmap.h>
#include <process.h>
#include <log.h>

void avm_chain_link(struct vma* vma, struct avm_chain* avc, struct avm* avm) {
    avc->vma = vma;
    avc->avm = avm;
    list_append(&vma->avm_chain, &avc->same_vma);

    avm_lock(avm);
    list_append(&avm->avc_list, &avc->same_avm);
    avm_unlock(avm);
}

inline void link_avm_page(struct avm* avm, page_t* page, uptr vaddr) {
    page->mapping = (uptr*)avm;
    page->index = vaddr >> PG_SIZE_BITS;
}

void avm_unlink(struct avm_chain* avc) {
    avm_lock(avc->avm);
    list_remove(&avc->same_avm);
    avm_unlock(avc->avm);
}

void unlink_avms(struct vma* vma) {
    struct avm_chain* avc, * next;

    list_for_each(avc, next, &vma->avm_chain, same_vma) {
        avm_unlink(avc);
        list_remove(&avc->same_vma);
        avm_chain_free(avc);
    }
}

int avm_clone(struct vma* dst, struct vma* src) {
    struct avm_chain* avc, * pavc, * next;
    list_for_each(pavc, next, &src->avm_chain, same_vma) {
        avc = avm_chain_alloc();
        if (!avc)
            goto fail;
        avm_chain_link(dst, avc, pavc->avm);
    }
    return 0;

fail:
    unlink_avms(dst);
    return ENOMEM;
}

int avm_fork(struct vma* src, struct vma* dst) {
    struct avm_chain* avc;
    struct avm* avm;

    if (!src->avm) {
        return 0;
    }

    if (avm_clone(dst, src)) {
        return ENOMEM;
    }

    avm = avm_alloc();
    if (!avm) {
        goto out;
    }
    avc = avm_chain_alloc();
    if (!avc) {
        avm_free(avm);
        goto out;
    }

    avm->root = src->avm->root;
    atomic_fetch_add(&avm->ref, 1);
    dst->avm = avm;
    avm_chain_link(dst, avc, avm);

    return 0;

out:
    unlink_avms(dst);
    return ENOMEM;
}

struct avm* get_lock_avm_from_page(page_t* page) {
    if (!page->mapping) {
        return NULL;
    }
    struct avm* avm = (struct avm*)(page->mapping);
    avm_lock(avm->root);
    return avm;
}

struct avm* get_lock_avm_from_addr(uptr vaddr) {
    struct vma* vma;
    for (vma = cur_proc->mm->vma_list; vma; vma = vma->next) {
        if (vma->start <= vaddr && vma->end > vaddr) {
            goto found;
        }
    }
    return NULL;

found:
    struct avm* avm = vma->avm;
    avm_lock(avm->root);
    return avm;
}

uptr vma_get_page_addr(struct vma* vma, page_t* page) {
    uptr vaddr = page->index << PG_SIZE_BITS;
    if (vaddr < vma->start || vaddr >= vma->end) {
        return EFAULT;
    }
    return vaddr;
}

pg_entry_t* get_page_table_entry(page_t* page, struct mm* mm, uptr vaddr) {
    uptr pd_addr = mm->proc->cr3;

    mapping_page_dir(PD_MP_1, pd_addr);

    if (!PDE_ENTRY(PD_MP_1, vaddr >> PG_SIZE_BITS).data) {
        return NULL;
    }

    pg_entry_t* entry = &PTE_ENTRY(PD_MP_1, vaddr >> PG_SIZE_BITS);
    if (!entry) {
        return NULL;
    }
    return entry;
}

int try_to_unmap_anon(page_t* page, struct swap_info* swap_info) {
    struct avm_chain* avc;
    struct avm* avm = get_lock_avm_from_page(page);
    pg_entry_t* entry;
    if (!avm) {
        return EFAULT;
    }
    bool second_chance = false;
    list_for_each_entry(avc, &avm->avc_list, same_avm) {
        struct vma* vma = avc->vma;
        if (vma->file) {
            // this is for anon-page not file-page
            avm_unlock(avm->root);
            return 1; // non-error return
            continue;
        }
        uptr addr = vma_get_page_addr(vma, page);
        if (addr == (uptr)EFAULT) {
            continue;
        }

        entry = get_page_table_entry(page, vma->mm, addr);
        if (!entry || entry->present == 0 || entry->frame != page_query_pn(page)) {
            continue;
        }

        if (entry->accessed) {
            entry->accessed = 0;
            page->flags |= PG_LRU_LAST_CHANCE;
            second_chance = true;
        } else {
            if (page->flags & PG_LRU_LAST_CHANCE) {
                page->flags &= ~PG_LRU_LAST_CHANCE;
                second_chance = true;
            }
        }

        if (second_chance) {
            break;
        }

        entry->swap.slot = swap_info->slot;
        entry->swap.swap_id = swap_info->swap_id;
        entry->swap.present = 0;
        entry->swap.sign = 7;

        cpu_flush_page(addr);

        if (page->ref_counts == 1) {
            break;
        }
        page->ref_counts--;
    }

    if (second_chance) {
        pg_entry_t copy = *entry;
        list_for_each_reverse_to_head(avc, &avm->avc_list, same_avm) {
            struct vma* vma = avc->vma;
            uptr addr = vma_get_page_addr(vma, page);
            if (addr == (uptr)EFAULT) {
                continue;
            }

            entry = get_page_table_entry(page, vma->mm, addr);
            if (!entry || entry->swap.sign != 7) {
                continue;
            }
            *entry = copy;
            page->ref_counts++;
        }
    }

    avm_unlock(avm->root);
    return second_chance;
}

int try_to_remap_anon(page_t* page) {
    struct avm_chain* avc;
    struct avm* avm = get_lock_avm_from_addr(page->index << PG_SIZE_BITS);
    pg_entry_t* entry;
    if (!avm) {
        return EFAULT;
    }
    page->mapping = (uptr*)avm;

    struct vma* vma;
    uptr vaddr, paddr = page_query_pn(page);
    list_for_each_entry(avc, &avm->avc_list, same_avm) {
        vma = avc->vma;
        vaddr = vma_get_page_addr(vma, page);
        if (vaddr == (uptr)EFAULT) {
            continue;
        }

        entry = get_page_table_entry(page, vma->mm, vaddr);
        if (entry->swap.sign != 7) {
            continue;
        }
        *entry = (pg_entry_t){
            .present = 1,
            .rw = vma_writeable(vma),
            .user = 1,
            .frame = paddr,
        };

        cpu_flush_page(vaddr);

        page->ref_counts++;
    }
    avm_unlock(avm->root);
    return 0;
}

int count_ref(uptr vaddr, pg_entry_t pte) {
    struct avm_chain* avc;
    struct avm* avm = get_lock_avm_from_addr(vaddr);
    pg_entry_t* entry;
    if (!avm) {
        return EFAULT;
    }

    struct vma* vma;
    int count = 0;
    list_for_each_entry(avc, &avm->avc_list, same_avm) {
        vma = avc->vma;
        if (vaddr < vma->start || vaddr >= vma->end) {
            continue;
        }

        uptr pd_addr = vma->mm->proc->cr3;
        mapping_page_dir(PD_MP_1, pd_addr);

        entry = &PTE_ENTRY(PD_MP_1, vaddr >> PG_SIZE_BITS);
        if (entry->data == pte.data) {
            count++;
        }
    }
    avm_unlock(avm->root);
    return count;
}