// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2025 Cloud_Yun (1770669041@qq.com). All Rights Reserved.
 *
 * ZRAM 年龄缓存机制实现
 * 
 * 通过维护一个红黑树为进程设置优先级,优先级高的进程避免压缩,优先级低的进程更积极压缩
 * 优化后台及缓存管理,提升后台应用留存能力和前台应用的性能
 */

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
#include <linux/sched.h>
#include <linux/rcupdate.h>
#include "zram_atgc.h"

static struct kmem_cache *atgc_entry_cache;

static void gc_work_fn(struct work_struct *work)
{
    struct zram_atgc *atgc = container_of(work, struct zram_atgc, gc_work.work);
    struct rb_node *node, *next;
    struct zram_atgc_entry *entry;
    unsigned long now = jiffies;
    unsigned long expire = atgc->expire_jiffies;
    LIST_HEAD(free_list);

    spin_lock(&atgc->lock);
    
    for (node = rb_first(&atgc->root); node; node = next) {
        next = rb_next(node);
        entry = rb_entry(node, struct zram_atgc_entry, rb_node);
        
        if (time_after(now, entry->last_used + expire)) {
            rb_erase(&entry->rb_node, &atgc->root);
            list_add(&entry->list, &free_list);
            printk(KERN_DEBUG "[ZRAM-ATGC] : Expired entry pid=%d\n", entry->pid);
        }
    }
    
    spin_unlock(&atgc->lock);

    while (!list_empty(&free_list)) {
        entry = list_first_entry(&free_list, struct zram_atgc_entry, list);
        list_del(&entry->list);
        kmem_cache_free(atgc_entry_cache, entry);
    }

    queue_delayed_work(system_unbound_wq, &atgc->gc_work, 
                       msecs_to_jiffies(ATGC_ENTRY_EXPIRE_MS));
}

static struct zram_atgc_entry *find_entry(struct zram_atgc *atgc, pid_t pid)
{
    struct rb_node *node = atgc->root.rb_node;
    struct zram_atgc_entry *entry;

    while (node) {
        entry = rb_entry(node, struct zram_atgc_entry, rb_node);
        
        if (pid < entry->pid)
            node = node->rb_left;
        else if (pid > entry->pid)
            node = node->rb_right;
        else
            return entry;
    }
    return NULL;
}

static void insert_entry(struct zram_atgc *atgc, struct zram_atgc_entry *new)
{
    struct rb_node **link = &atgc->root.rb_node;
    struct rb_node *parent = NULL;
    struct zram_atgc_entry *entry;

    while (*link) {
        parent = *link;
        entry = rb_entry(parent, struct zram_atgc_entry, rb_node);
        
        if (new->pid < entry->pid)
            link = &parent->rb_left;
        else
            link = &parent->rb_right;
    }
    
    rb_link_node(&new->rb_node, parent, link);
    rb_insert_color(&new->rb_node, &atgc->root);
}

void zram_atgc_init(struct zram_atgc *atgc)
{
    if (!atgc_entry_cache) {
        atgc_entry_cache = KMEM_CACHE(zram_atgc_entry, SLAB_PANIC);
        printk(KERN_INFO "[ZRAM-ATGC] : Cache created\n");
    }

    atgc->root = RB_ROOT;
    spin_lock_init(&atgc->lock);
    atomic_set(&atgc->enabled, 1);
    atgc->expire_jiffies = msecs_to_jiffies(ATGC_ENTRY_EXPIRE_MS);
    atgc->threshold = HIGH_PRIO_THRESHOLD;
    
    INIT_DELAYED_WORK(&atgc->gc_work, gc_work_fn);
    queue_delayed_work(system_unbound_wq, &atgc->gc_work, 
                       msecs_to_jiffies(ATGC_ENTRY_EXPIRE_MS));
    
    printk(KERN_INFO "[ZRAM-ATGC] : Initialized\n");
}

void zram_atgc_destroy(struct zram_atgc *atgc)
{
    struct rb_node *node;
    struct zram_atgc_entry *entry;
    LIST_HEAD(free_list);

    cancel_delayed_work_sync(&atgc->gc_work);
    
    spin_lock(&atgc->lock);
    while ((node = rb_first(&atgc->root))) {
        entry = rb_entry(node, struct zram_atgc_entry, rb_node);
        rb_erase(node, &atgc->root);
        list_add(&entry->list, &free_list);
    }
    spin_unlock(&atgc->lock);

    while (!list_empty(&free_list)) {
        entry = list_first_entry(&free_list, struct zram_atgc_entry, list);
        list_del(&entry->list);
        kmem_cache_free(atgc_entry_cache, entry);
    }
    
    printk(KERN_INFO "[ZRAM-ATGC] : Destroyed\n");
}

bool zram_atgc_should_compress(struct zram_atgc *atgc, struct task_struct *task)
{
    if (!atomic_read(&atgc->enabled) || !task)
        return true;

    return task->signal->oom_score_adj > atgc->threshold;
}

void zram_atgc_update(struct zram_atgc *atgc, struct task_struct *task)
{
    struct zram_atgc_entry *entry, *new = NULL;
    pid_t pid = task->pid;
    int oom_score = task->signal->oom_score_adj;
    unsigned long flags;

    if (!atomic_read(&atgc->enabled))
        return;

    spin_lock_irqsave(&atgc->lock, flags);
    
    entry = find_entry(atgc, pid);
    if (entry) {
        entry->oom_score_adj = oom_score;
        entry->last_used = jiffies;
        spin_unlock_irqrestore(&atgc->lock, flags);
        return;
    }

    spin_unlock_irqrestore(&atgc->lock, flags);

    new = kmem_cache_alloc(atgc_entry_cache, GFP_KERNEL);
    if (!new)
        return;
    
    new->pid = pid;
    new->oom_score_adj = oom_score;
    new->last_used = jiffies;
    INIT_LIST_HEAD(&new->list);
    
    spin_lock_irqsave(&atgc->lock, flags);
    
    entry = find_entry(atgc, pid);
    if (likely(!entry)) {
        insert_entry(atgc, new);
        printk(KERN_DEBUG "[ZRAM-ATGC] : New entry pid=%d oom=%d\n", pid, oom_score);
        spin_unlock_irqrestore(&atgc->lock, flags);
        return;
    }
    
    entry->oom_score_adj = oom_score;
    entry->last_used = jiffies;
    spin_unlock_irqrestore(&atgc->lock, flags);
    kmem_cache_free(atgc_entry_cache, new);
}
