#include "config.h"

#include <time.h>
#include <errno.h>
#include <stdint.h>

#define DBG_SUBSYS S_LIBYLIB

#include "sysutil.h"
#include "timer.h"
#include "sysy_lib.h"
#include "dbg.h"
#include "../../schedule/cpuset.h"
#include "cache.h"
#include "core.h"

extern int __shutdown__;
typedef mcache_entry_t entry_t;

typedef struct {
        mcache_group_t *group;
        const char *name;
} mcache_collect_ctx_t;

#define __COLLECT_INTERVAL__ 60 * 5
#define __LIST_MAX__ 10

static entry_t *__list_find(struct list_head *head, const void *key, cmp_func cmp, const char *name)
{
        int count = 0;
        struct list_head *pos;

        entry_t *entry, *ret = 0;

        list_for_each(pos, head) {
                count++;
                entry = (entry_t *)pos;

                YASSERT(entry->value);

                if (cmp(key, entry->value)) {
                        ret = entry;
                        break;
                }
        }

        DBUG("seek %u @ %s\n", count, name);

        if (count > 512) {
                DWARN("seek %u @ %s\n", count, name);
        }

        return ret;
}

static int __mcache_getref(entry_t *ent)
{
        int ret, ref;

        YASSERT(ent->ref >= 0);

        if (likely(ent->_private)) {
                ref = ent->ref;
        } else {
                ret = sy_spin_lock(&ent->ref_lock);
                if (unlikely(ret))
                        UNIMPLEMENTED(__DUMP__);

                ref = ent->ref;

                sy_spin_unlock(&ent->ref_lock);
        }

        return ref;
}


static void __mcache_newsize(mcache_group_t *group, int count)
{
        int ret;

        ret = sy_spin_lock(&group->size_lock);
        if (unlikely(ret))
                UNIMPLEMENTED(__DUMP__);

        group->entry += count;

        sy_spin_unlock(&group->size_lock);
}

static uint64_t __mcache_getsize(mcache_group_t *group)
{
        int ret;
        uint64_t size;

        ret = sy_spin_lock(&group->size_lock);
        if (unlikely(ret))
                UNIMPLEMENTED(__DUMP__);

        size = group->entry;

        sy_spin_unlock(&group->size_lock);

        return size;
}

static int __mcache_drop__(mcache_group_t *group, mcache_head_t *head, entry_t *ent, int recycle)
{
        int ret;

        // TODO core
        YASSERT(head->curlen);

        if (__mcache_getref(ent)) {
                ret = EBUSY;
                GOTO(err_ret, ret);
        }

        __mcache_newsize(group, -1);

        list_del(&ent->hook);
        head->curlen --;

        if (group->drop) {
                group->drop(ent->value, ent, recycle);
                ent->value = NULL;
        } else {
                yfree((void **)&ent->value);
        }

        yfree((void **)&ent);

        return 0;
err_ret:
        return ret;
}

STATIC int __mcache_head_wrlock(mcache_head_t *head)
{
        if (likely(head->_private)) {
                return plock_wrlock(&head->u.plock);
        } else {
                return sy_rwlock_wrlock(&head->u.rwlock);
        }
}

#if 0
static int __mcache_head_rdlock(mcache_head_t *head)
{
        if (likely(head->_private)) {
                return plock_rdlock(&head->u.plock);
        } else {
                return sy_rwlock_rdlock(&head->u.rwlock);
        }
}
#endif

STATIC void __mcache_head_unlock(mcache_head_t *head)
{
        if (likely(head->_private)) {
                return plock_unlock(&head->u.plock);
        } else {
                return sy_rwlock_unlock(&head->u.rwlock);
        }
}

static int __mcache_drop_last(mcache_group_t *group, mcache_head_t *head)
{
        int ret;
        struct list_head *pos;
        entry_t *ent;

        ret = __mcache_head_wrlock(head);
        if (unlikely(ret))
                UNIMPLEMENTED(__DUMP__);

        if (list_empty(&head->head)) {
                ret = ENOENT;
                goto err_lock;
        }

        pos = head->head.prev;
        ent = (entry_t *)pos;

        ret = __mcache_drop__(group, head, ent, 1);
        if (unlikely(ret)) {
                GOTO(err_lock, ret);
        }

        __mcache_head_unlock(head);

        return 1;
err_lock:
        __mcache_head_unlock(head);
//err_ret:
        return 0;
}

static void __mcache_collect_group(mcache_group_t *group, const char *name)
{
        int retry = 0, i, count;
        int64_t size = 0, last = -1;
        mcache_head_t *head;

        while (1) {
                size = __mcache_getsize(group);
                if (size <= (int64_t)group->max_entry) {
                        break;
                }

                if (last == size) {
                        DERROR("cache %s need be collect, size %lld last %lld, max %llu, retry %u\n",
                               name, (LLU)size, (long long)last, (long long)group->max_entry, retry);

                        break;
                } else {
                        DINFO("cache %s need be collect, size %lld max %llu, retry %u\n",
                              name, (long long)size, (long long)group->max_entry, retry);
                }

                count = 0;
                for (i = 0; i < group->array_len; i++) {
                        head = &group->array[i];
                        count += __mcache_drop_last(group, head);
                }

                if (count < group->array_len) {
                        DINFO("cache %s collect %u\n", name, count);
                }

                last = size;
                retry++;
        }
}

static void __mcache_collect_group_core(void *arg)
{
        mcache_collect_ctx_t *ctx = arg;

        __mcache_collect_group(ctx->group, ctx->name);

        mem_cache_free(MEM_CACHE_64, ctx);
}

static void __mcache_collect_core(void *_group, void *name)
{
        mcache_collect_ctx_t *ctx;

#ifdef HAVE_STATIC_ASSERT
        static_assert(sizeof(*ctx)  < sizeof(mem_cache64_t), "mcache_collect_ctx_t");
#endif

        ctx = mem_cache_calloc(MEM_CACHE_64, 0);
        ctx->group = _group;
        ctx->name = name;

        schedule_task_new("mcache_collect_group", __mcache_collect_group_core, ctx, -1);
}

static int __mcache_collect(void *_cache)
{
        int ret, i;
        mcache_t *cache = _cache;

        for (i = 0; i < cpuset_useable(); i++) {
                __mcache_collect_group(cache->group[i], cache->name);
        }

        ret = timer1_settime(&cache->timer_handler, USEC_PER_SEC * __COLLECT_INTERVAL__);
        if (unlikely(ret))
                UNIMPLEMENTED(__DUMP__);

        return 0;
}

static int __mcache_group_init(mcache_group_t **_group, uint64_t max_entry,
                               drop_func drop, int private)
{
        int ret;
        uint32_t array_len, len, i;
        mcache_group_t *group;
        mcache_head_t *mcache_head;

        array_len = max_entry / __LIST_MAX__;
        len = sizeof(*group) + sizeof(mcache_head_t) * array_len;
        ret = ymalloc((void **)&group, len);
        if (unlikely(ret))
                GOTO(err_ret, ret);

        group->drop = drop;
        group->array_len = array_len;
        group->entry = 0;
        group->max_entry = max_entry;
        sy_spin_init(&group->size_lock);

        for (i = 0; i < array_len; i++) {
                mcache_head = &group->array[i];

                mcache_head->curlen = 0;
                mcache_head->_private = private;

                if (private) {
                        plock_init(&mcache_head->u.plock, "cache.plock");
                } else {
                        sy_rwlock_init(&mcache_head->u.rwlock, "cache.rwlock");
                }

                INIT_LIST_HEAD(&mcache_head->head);
        }

        *_group = group;

        return 0;
err_ret:
        return ret;
}

int mcache_init(mcache_t **_cache, uint64_t max_entry, cmp_func cmp,
                hash_func hash, hash_func group_hash, drop_func drop, int private,
                const char *name)
{
        int ret, i;
        uint32_t len;
        mcache_t *cache;
        char tmp[MAX_BUF_LEN];

        len = sizeof(mcache_t) + sizeof(mcache_group_t *) * cpuset_useable();
        ret = ymalloc((void **)&cache, len);
        if (unlikely(ret))
                GOTO(err_ret, ret);

        DINFO("cache %s malloc %u, private %u, max entry %llu\n", name, len, private, (LLU)max_entry);

        cache->hash = hash;
        cache->_private = private;
        cache->cmp = cmp;
        cache->group_hash = group_hash;

        if (strlen(name) > MAX_NAME_LEN) {
                ret = EINVAL;
                GOTO(err_ret, ret);
        }

        strcpy(cache->name, name);

        for (i = 0; i < cpuset_useable(); i++) {
                ret = __mcache_group_init(&cache->group[i], max_entry / cpuset_useable(), drop, private);
                if (unlikely(ret))
                        UNIMPLEMENTED(__DUMP__);

                if (private) {
                        snprintf(tmp, MAX_NAME_LEN, "%s[%u]", name, i);
                        core_check_register(i, tmp, cache->group[i], __mcache_collect_core);
                }
        }

        if (!private) {
                snprintf(tmp, MAX_NAME_LEN, "cache.%s", name);
                ret = timer1_create(&cache->timer_handler, tmp, __mcache_collect, cache);
                if (unlikely(ret))
                        GOTO(err_ret, ret);

                ret = timer1_settime(&cache->timer_handler, USEC_PER_SEC * __COLLECT_INTERVAL__);
                if (unlikely(ret))
                        GOTO(err_ret, ret);
        }

        *_cache = cache;

        DBUG("cache %p name %s\n", cache, cache->name);

        return 0;
err_ret:
        return ret;
}

static int __mcache_insert_nolock(entry_t **_ent, mcache_group_t *group,
                                  mcache_head_t *head, const void *key, void *value,
                                  cmp_func cmp, const char *name, int private)
{
        int ret;
        entry_t *ent;

        ent = __list_find(&head->head, key, cmp, name);
        if (ent) {
                if (ent->erase || ent->__erase__) {
                        DWARN("ent %p erased ref:%d\n", ent, ent->ref);
                        __shutdown__ = 1;
                        UNIMPLEMENTED(__WARN__);
                        EXIT(EAGAIN);
                }

                ret = EEXIST;
                GOTO(err_ret, ret);
        }

        ret = ymalloc((void **)&ent, sizeof(entry_t));
        if (unlikely(ret))
                GOTO(err_ret, ret);

        ent->value = value;
        ent->ref = 0;
        ent->erase = 0;
        ent->__erase__ = 0;
#ifdef CACHE_DROP_DEBUG
        ent->erase_time = 0;
        INIT_LIST_HEAD(&ent->ref_list);
#endif
        ent->_private = private;
        list_add(&ent->hook, &head->head);
        head->curlen ++;
        ent->head = head;
        ent->group = group;

        if (private) {
                ret = plock_init(&ent->u.plock, "cache.ent.plock");
                if (unlikely(ret))
                        UNIMPLEMENTED(__DUMP__);
        } else {
                ret = sy_spin_init(&ent->ref_lock);
                if (unlikely(ret))
                        UNIMPLEMENTED(__WARN__);

                ret = sy_rwlock_init(&ent->u.rwlock, "cache.ent.rwlock");
                if (unlikely(ret))
                        UNIMPLEMENTED(__DUMP__);
        }

        *_ent = ent;

        return 0;
err_ret:
        return ret;
}

void mcache_remove_unlock(entry_t *ent)
{
        mcache_group_t *group;
        mcache_head_t *head;

        group = ent->group;

        __mcache_newsize(group, -1);

        head = ent->head;
        list_del(&ent->hook);
        head->curlen--;

        __mcache_head_unlock(head);

        yfree((void **)&ent);
}

STATIC void __mcache_map(mcache_t *cache, const void *key, mcache_group_t **_group,
                   mcache_head_t **_head)
{
        uint32_t hash, group_hash;
        mcache_head_t *head;
        mcache_group_t *group;

        hash = cache->hash(key);
        group_hash = cache->group_hash(key);

        group = cache->group[group_hash % cpuset_useable()];
        head = &group->array[hash % group->array_len];

        if (_group)
                *_group = group;
        if (_head)
                *_head = head;
}

int mcache_insert(mcache_t *cache, const void *key, void *value)
{
        int ret;
        mcache_head_t *head;
        mcache_group_t *group;
        entry_t *ent;

        YASSERT(value);

        __mcache_map(cache, key, &group, &head);

        ret = __mcache_head_wrlock(head);
        if (unlikely(ret))
                GOTO(err_ret, ret);

        ret = __mcache_insert_nolock(&ent, group, head, key,
                                     value, cache->cmp, cache->name, cache->_private);
        if (unlikely(ret)) {
                GOTO(err_lock, ret);
        }

        __mcache_newsize(group, 1);

        YASSERT(ent->value);

        __mcache_head_unlock(head);

        return 0;
err_lock:
        __mcache_head_unlock(head);
err_ret:
        return ret;
}

int mcache_insert_wrlock(mcache_t *cache, const void *key, void *value, entry_t **_ent)
{
        int ret;
        mcache_head_t *head;
        mcache_group_t *group;
        entry_t *ent;

        YASSERT(value);

        __mcache_map(cache, key, &group, &head);

        ret = __mcache_head_wrlock(head);
        if (unlikely(ret))
                GOTO(err_ret, ret);

        ret = __mcache_insert_nolock(&ent, group, head, key,
                                     value, cache->cmp, cache->name, cache->_private);
        if (unlikely(ret)) {
                GOTO(err_lock, ret);
        }

        __mcache_newsize(group, 1);

        YASSERT(ent->value);

        if (likely(ent->_private)) {
                ret = plock_wrlock(&ent->u.plock);
                if (unlikely(ret)) {
                        if (ret == ETIMEDOUT)
                                ret = EAGAIN;
                        GOTO(err_lock, ret);
                }
        } else {
                ret = sy_rwlock_wrlock(&ent->u.rwlock);
                if (unlikely(ret)) {
                        if (ret == ETIMEDOUT)
                                ret = EAGAIN;
                        GOTO(err_lock, ret);
                }
        }

        YASSERT(ent->erase == 0 && ent->__erase__ == 0);
        YASSERT(ent->value);

        ret = mcache_ref(ent);
        if (unlikely(ret))
                GOTO(err_lock, ret);

        *_ent = ent;
        
        __mcache_head_unlock(head);

        return 0;
err_lock:
        __mcache_head_unlock(head);
err_ret:
        return ret;
}


int mcache_insert_lock(mcache_t *cache, const void *key, entry_t **_ent)
{
        int ret;
        mcache_head_t *head;
        mcache_group_t *group;
        entry_t *ent;

        __mcache_map(cache, key, &group, &head);

        ret = __mcache_head_wrlock(head);
        if (unlikely(ret))
                GOTO(err_ret, ret);

        ret = __mcache_insert_nolock(&ent, group, head, key,
                                     NULL, cache->cmp, cache->name, cache->_private);
        if (unlikely(ret))
                GOTO(err_lock, ret);

        __mcache_newsize(group, 1);

        *_ent = ent;

        return 0;
err_lock:
        __mcache_head_unlock(head);
err_ret:
        return ret;
}

void mcache_insert_unlock(mcache_t *cache, const void *key, entry_t *ent)
{
        mcache_head_t *head;

        (void) key;
        (void) cache;

        head = ent->head;

        __mcache_head_unlock(head);
}

int mcache_get(mcache_t *cache, const void *key, mcache_entry_t **_ent)
{
        int ret;
        mcache_head_t *head;
        mcache_group_t *group;
        entry_t *ent;

        if (unlikely(cache == NULL)) {
                ret = EAGAIN;
                GOTO(err_ret, ret);
        }

        __mcache_map(cache, key, &group, &head);

        ret = __mcache_head_wrlock(head);
        if (unlikely(ret))
                GOTO(err_ret, ret);

        if (unlikely(head->curlen == 0)) {
                ret = ENOENT;
                goto err_lock;
        }

        ent = __list_find(&head->head, key, cache->cmp, cache->name);
        if (unlikely(!ent)) {
                ret = ENOENT;
                goto err_lock;
        }

        if (unlikely(ent->erase || ent->__erase__)) {
                DBUG("erased %p\n", ent);
#ifdef CACHE_DROP_DEBUG
                // TODO core
                if (ent->erase_time) {
                        YASSERT(gettime() - ent->erase_time < gloconf.rpc_timeout * 3);
                }
#endif
                
                DWARN("%s ent %p erased, ref %u\n", cache->name, ent, ent->ref);
                //schedule_sleep(cache->name, 1000 * 1000);
                ret = ESTALE;
                GOTO(err_lock, ret);
        }

        list_del(&ent->hook);
        list_add(&ent->hook, &head->head);

        ret = mcache_ref(ent);
        if (unlikely(ret))
                GOTO(err_lock, ret);

        *_ent = ent;
        __mcache_head_unlock(head);

        DBUG("cache get\n");

        return 0;
err_lock:
        __mcache_head_unlock(head);
err_ret:
        return ret;
}

static int __mcache_drop_free(entry_t *ent)
{
        int ret;
        mcache_head_t *head;
        mcache_group_t *group;

        DBUG("drop %p\n", ent);
        
        head = ent->head;
        group = ent->group;

        ret = __mcache_head_wrlock(head);
        if (unlikely(ret))
                GOTO(err_ret, ret);

        ret = __mcache_drop__(group, head, ent, 0);
        if (unlikely(ret)) {
                GOTO(err_lock, ret);
        }

        __mcache_head_unlock(head);

        return 0;
err_lock:
        __mcache_head_unlock(head);
err_ret:
        return ret;
}

#ifdef CACHE_DROP_DEBUG
static mcache_ref_t * __mcache_ref_find(entry_t *ent, task_t *task)
{
        mcache_ref_t *ref;
        struct list_head *pos;

        list_for_each(pos, &ent->ref_list) {
                ref = (mcache_ref_t *)pos;
                if (!memcmp(&ref->ownner, task, sizeof(*task))) {
                        return ref;
                }
        }

        return NULL;
}

static int __mcache_ref_new(entry_t *ent, task_t *task)
{
        int ret;
        mcache_ref_t *new;

        ret = ymalloc((void **)&new, sizeof(*new));
        if (unlikely(ret))
                GOTO(err_ret, ret);

        new->count = 1;
        new->ownner = *task;

        list_add(&new->hook, &ent->ref_list);

        return 0;
err_ret:
        return ret;
}

static int __mcache_ref_add(entry_t *ent)
{
        int ret;
        mcache_ref_t *ref;
        task_t task;

        DBUG("ent %p ref:%d\n", ent, ent->ref);

        schedule_task_given(&task);

        ref =  __mcache_ref_find(ent, &task);
        if (ref) {
                ref->count++;
        } else {
                ret = __mcache_ref_new(ent, &task);
                if (unlikely(ret))
                        GOTO(err_ret, ret);
        }

        return 0;
err_ret:
        return ret;
}

static int __mcache_ref_del(entry_t *ent)
{
        mcache_ref_t *ref;
        task_t task;

        DBUG("ent %p ref:%d\n", ent, ent->ref);

        schedule_task_given(&task);

        ref =  __mcache_ref_find(ent, &task);
        if (!ref) {
                YASSERT(0);
        }

        ref->count--;

        if (ref->count == 0) {
                list_del(&ref->hook);
                yfree((void **)&ref);
        }

        return 0;
}
#endif

int mcache_ref(entry_t *ent)
{
        int ret;

        YASSERT(ent->ref >= 0);

        if (unlikely(ent->erase || ent->__erase__)) {
                ret = ESTALE;
                GOTO(err_ret, ret);
        }
        
        if (likely(ent->_private)) {
                ent->ref++;
#ifdef CACHE_DROP_DEBUG
                __mcache_ref_add(ent);
#endif
        } else {
                ret = sy_spin_lock(&ent->ref_lock);
                if (unlikely(ret))
                        UNIMPLEMENTED(__DUMP__);

                ent->ref++;
#ifdef CACHE_DROP_DEBUG
                __mcache_ref_add(ent);
#endif

                sy_spin_unlock(&ent->ref_lock);
        }

        return 0;
err_ret:
        return ret;
}

static void __mcache_deref(entry_t *ent, int *drop)
{
        int ret;

        YASSERT(ent->ref > 0);

        if (likely(ent->_private)) {
                ent->ref--;
#ifdef CACHE_DROP_DEBUG
                __mcache_ref_del(ent);
#endif

                if (ent->erase && ent->ref == 0) {
                        *drop = 1;
                        ent->erase = 0;
                } else {
                        *drop = 0;
                }
        } else {
                ret = sy_spin_lock(&ent->ref_lock);
                if (unlikely(ret))
                        UNIMPLEMENTED(__DUMP__);

                ent->ref--;
#ifdef CACHE_DROP_DEBUG
                __mcache_ref_del(ent);
#endif
                if (ent->erase && ent->ref == 0) {
                        *drop = 1;
                        ent->erase = 0;
                } else {
                        *drop = 0;
                }

                sy_spin_unlock(&ent->ref_lock);
        }
}

void mcache_release(entry_t *ent)
{
        int ret, drop;

        DBUG("cache release\n");

        YASSERT(ent->value);
        YASSERT(ent->ref > 0);
        
        if (unlikely(ent->erase == 0 && ent->__erase__ == 0)) {
                YASSERT(ent->value);
        }

        __mcache_deref(ent, &drop);

        DBUG("ref %u\n", ent->ref);

        if (unlikely(drop)) {
                ret = __mcache_drop_free(ent);
                if (unlikely(ret))
                        UNIMPLEMENTED(__DUMP__);
        }

        return;
}

int mcache_wrlock(entry_t *ent)
{
        int ret;

        if (likely(ent->_private)) {
                ret = plock_wrlock(&ent->u.plock);
                if (unlikely(ret)) {
                        if (ret == ETIMEDOUT)
                                ret = EAGAIN;
                        GOTO(err_ret, ret);
                }
        } else {
                ret = sy_rwlock_wrlock(&ent->u.rwlock);
                if (unlikely(ret)) {
                        if (ret == ETIMEDOUT)
                                ret = EAGAIN;
                        GOTO(err_ret, ret);
                }
        }

        YASSERT(ent->ref > 0);

        if (unlikely(ent->erase || ent->__erase__)) {
                DBUG("erased %p\n", ent);
                mcache_unlock(ent);
                ret = ESTALE;
                GOTO(err_ret, ret);
        }

        YASSERT(ent->value);

        return 0;
err_ret:
        return ret;
}

int mcache_trywrlock(entry_t *ent)
{
        int ret;

        if (likely(ent->_private)) {
                ret = plock_trywrlock(&ent->u.plock);
                if (unlikely(ret)) {
                        if (ret == ETIMEDOUT)
                                ret = EAGAIN;
                        goto err_ret;
                }
        } else {
                ret = sy_rwlock_trywrlock(&ent->u.rwlock);
                if (unlikely(ret)) {
                        if (ret == ETIMEDOUT)
                                ret = EAGAIN;
                        goto err_ret;
                }
        }

        YASSERT(ent->ref > 0);

        if (unlikely(ent->erase || ent->__erase__)) {
                DBUG("erased %p\n", ent);
                mcache_unlock(ent);
                ret = ESTALE;
                GOTO(err_ret, ret);
        }

        YASSERT(ent->value);

        return 0;
err_ret:
        return ret;
}

int mcache_wrlock_prio(entry_t *ent, int prio)
{
        int ret;

        if (likely(ent->_private)) {
                ret = plock_wrlock_prio(&ent->u.plock, prio);
                if (unlikely(ret)) {
                        if (ret == ETIMEDOUT)
                                ret = EAGAIN;
                        GOTO(err_ret, ret);
                }
        } else {
                ret = sy_rwlock_wrlock(&ent->u.rwlock);
                if (unlikely(ret)) {
                        if (ret == ETIMEDOUT)
                                ret = EAGAIN;
                        GOTO(err_ret, ret);
                }
        }

        YASSERT(ent->ref > 0);

        if (unlikely(ent->erase || ent->__erase__)) {
                DBUG("erased %p\n", ent);
                mcache_unlock(ent);
                ret = ESTALE;
                GOTO(err_ret, ret);
        }

        YASSERT(ent->value);

        return 0;
err_ret:
        return ret;
}

int IO_FUNC mcache_rdlock(entry_t *ent)
{
        int ret;

        if (likely(ent->_private)) {
                ret = plock_rdlock(&ent->u.plock);
                if (unlikely(ret)) {
                        if (ret == ETIMEDOUT)
                                ret = EAGAIN;
                        GOTO(err_ret, ret);
                }
        } else {
                ret = sy_rwlock_rdlock(&ent->u.rwlock);
                if (unlikely(ret)) {
                        if (ret == ETIMEDOUT)
                                ret = EAGAIN;
                        GOTO(err_ret, ret);
                }
        }

        YASSERT(ent->ref > 0);

        if (unlikely(ent->erase || ent->__erase__)) {
                mcache_unlock(ent);
                ret = ESTALE;
                GOTO(err_ret, ret);
        }

        YASSERT(ent->value);

        return 0;
err_ret:
        return ret;
}

int IO_FUNC mcache_tryrdlock(entry_t *ent)
{
        int ret;

        if (likely(ent->_private)) {
                ret = plock_tryrdlock(&ent->u.plock);
                if (unlikely(ret)) {
                        if (ret == ETIMEDOUT)
                                ret = EAGAIN;
                        GOTO(err_ret, ret);
                }
        } else {
                ret = sy_rwlock_tryrdlock(&ent->u.rwlock);
                if (unlikely(ret)) {
                        if (ret == ETIMEDOUT)
                                ret = EAGAIN;
                        GOTO(err_ret, ret);
                }
        }

        YASSERT(ent->ref > 0);

        if (ent->erase || ent->__erase__) {
                mcache_unlock(ent);
                ret = ESTALE;
                GOTO(err_ret, ret);
        }

        YASSERT(ent->value);

        return 0;
err_ret:
        return ret;
}

void IO_FUNC mcache_unlock(entry_t *ent)
{
        YASSERT(ent->ref > 0);

        if (likely(ent->_private)) {
                plock_unlock(&ent->u.plock);
        } else {
                sy_rwlock_unlock(&ent->u.rwlock);
        }

        YASSERT(ent->value);
}

void mcache_iterator(mcache_t *cache, void (*callback)(void *, void *), void *arg)
{
        mcache_group_t *group;
        mcache_head_t *head;
        struct list_head *pos;
        // mcache_entry_t *entry;
        (void) cache;
        (void) callback;
        (void) arg;

        DWARN("cache %p name %s group %d\n", cache, cache->name, cpuset_useable());
        for (int i = 0; i < cpuset_useable(); i++) {
                group = cache->group[i];
                for (int j = 0; j < group->array_len; j++) {
                        head = &group->array[j];
                        list_for_each(pos, &head->head) {
                                // entry = (mcache_entry_t *)pos;
                                callback(arg, pos);
                        }
                }
        }

#if 0
        int ret;
        struct list_head *pos;
        entry_t *ent;
        ret = sy_spin_lock(&cache->lru_lock);
        if (unlikely(ret))
                UNIMPLEMENTED(__DUMP__);

        list_for_each(pos, &cache->lru) {
                ent = (void *)pos - sizeof(struct list_head);

                callback(arg, ent);
        }

        sy_spin_unlock(&cache->lru_lock);
#endif
}

int mcache_lockcache(mcache_t *cache, const void *key)
{
        int ret;
        mcache_head_t *head;
        mcache_group_t *group;

        __mcache_map(cache, key, &group, &head);

        ret = __mcache_head_wrlock(head);
        if (unlikely(ret))
                GOTO(err_ret, ret);

        return 0;
err_ret:
        return ret;
}

void mcache_unlockcache(mcache_t *cache, const void *key)
{
        mcache_head_t *head;
        mcache_group_t *group;

        __mcache_map(cache, key, &group, &head);

        __mcache_head_unlock(head);
}

int mcache_drop_nolock(entry_t *ent)
{
        int ret;
        mcache_head_t *head;

        head = ent->head;
        YASSERT(head->curlen);

        ret = mcache_trywrlock(ent);
        YASSERT(ret);
        YASSERT(ent->ref);
        
        if (likely(ent->_private)) {
                if (ent->erase == 0 && ent->__erase__ == 0) {
                        DBUG("erase %p\n", ent);
                        ent->erase = 1;
                        ent->__erase__ = 1;

#ifdef CACHE_DROP_DEBUG
                        ent->erase_time = gettime();
#endif
                }
        } else {
                ret = sy_spin_lock(&ent->ref_lock);
                if (unlikely(ret))
                        GOTO(err_ret, ret);

                YASSERT(ent->ref);
                if (ent->erase == 0 && ent->__erase__ == 0) {
                        DBUG("erase %p\n", ent);
                        ent->erase = 1;
                        ent->__erase__ = 1;
#ifdef CACHE_DROP_DEBUG
                        ent->erase_time = gettime();
#endif
                }

                sy_spin_unlock(&ent->ref_lock);
        }

        return 0;
err_ret:
        return ret;
}

int mcache_isdroped(entry_t *ent)
{
        int ret;

        ret = mcache_rdlock(ent);
        if (likely(ret == 0))
                mcache_unlock(ent);

        return ret == ESTALE;
}

int mcache_drop(entry_t *ent)
{
        int ret;
        mcache_head_t *head;

        head = ent->head;
        YASSERT(head->curlen);

        if (mcache_isdroped(ent))
                return 0;

        ret = mcache_wrlock_prio(ent, 1);
        if (unlikely(ret))
                GOTO(err_ret, ret);

        ret = mcache_drop_nolock(ent);
        if (unlikely(ret))
                GOTO(err_lock, ret);
        
        mcache_unlock(ent);
        
        return 0;
err_lock:
        mcache_unlock(ent);
err_ret:
        return ret;
}
