#include "my_disk_data.h"
#include "my_disk_context.h"

#define MY_RETRY_TIMEOUT_MS                         1
#define MY_MAX_RETRY_NUM                            3

static void
my_disk_data_do_recycle_handler(my_event_t *event);

static void
my_disk_data_do_flush_handler(my_event_t *event);

static void
my_disk_data_recycle_done(my_disk_data_t *disk_data);


typedef struct {
    my_disk_data_t              *disk_data;
    my_disk_hotspot_record_t    record;
    my_cache_key_t              key;
    my_buf_chain_t              *data;
    my_cache_req_t              req;
    my_buf_t                    *buf;
    my_aio_req_t                aio_req;
    my_dispatch_event_t         dispatch_event;
} my_disk_data_recycle_task_t;

static inline void
my_disk_data_recycle_task_init(my_disk_data_recycle_task_t *task,
    my_disk_data_t *disk_data, my_disk_hotspot_record_t record) {
    bzero(task, sizeof(my_disk_data_recycle_task_t));
    task->disk_data = disk_data;
    task->record = record;
    task->data = my_buf_chain_alloc();
    my_dispatch_event_init(&task->dispatch_event);
}

static void
my_disk_data_recycle_task_done(my_disk_data_recycle_task_t *task) {
    my_disk_data_t *disk_data = task->disk_data;
    my_buf_free(task->buf);
    my_buf_chain_free(task->data);
    my_free(task);

    disk_data->recycle_task--;
    return my_disk_data_recycle_done(disk_data);
}

static void
my_disk_data_recycle_reinsert_done(my_cache_req_t *req) {
    my_disk_data_recycle_task_t *task = req->ctx_data;
    return my_disk_data_recycle_task_done(task);
}

static void
my_disk_data_recycle_read_done(my_dispatch_event_t *dispatch_event) {
    my_aio_req_t *aio_req = dispatch_event->data;
    my_disk_data_recycle_task_t *task = dispatch_event->save_data;
    my_disk_data_t *disk_data = task->disk_data;
    my_buf_t *buf = task->buf;

    if (aio_req->eno != 0 || aio_req->res != aio_req->nbytes) {
        MY_LOG_ERROR("disk_data", "recycle task read failed, errno: %d, res: %d",
            aio_req->eno, aio_req->res);
        return my_disk_data_recycle_task_done(task);
    }
    buf->end += aio_req->res;

    my_disk_cache_object_t *obj = (my_disk_cache_object_t *)buf->pos;
    int64_t object_size = sizeof(my_disk_cache_object_t) + obj->key_len + obj->data_len;
    if (aio_req->nbytes < object_size) {
        MY_LOG_ERROR("disk_data", "to short object, read nbytes: %lld, object size: %lld",
            aio_req->nbytes, object_size);
        return my_disk_data_recycle_task_done(task);
    }
    
    my_cache_key_init(&task->key, buf->pos + sizeof(my_disk_cache_object_t), obj->key_len);
    if (task->key.digest32 != task->record.digest32) {
        MY_LOG_ERROR0("disk_data", "digest32 no match in recycle object");
        return my_disk_data_recycle_task_done(task);
    }

    buf->pos += (sizeof(my_disk_cache_object_t) + obj->key_len);
    buf->end = buf->pos + obj->data_len;

    uint32_t crc32 = my_buf_crc32(buf);

    if (crc32 != obj->crc32) {
        MY_LOG_ERROR0("disk_data", "crc32 no match in recycle object");
        return my_disk_data_recycle_task_done(task);
    }

    my_buf_chain_append(task->data, buf);

    my_cache_req_init(&task->req, &task->key, task->data, task,
        my_disk_data_recycle_reinsert_done);

    disk_data->reinsert_handler(disk_data->volume, &task->req, task->record.offset);
}

static void
my_disk_data_setup_recycle_record_task(my_disk_data_t *disk_data,
    my_disk_hotspot_record_t record) {

    my_disk_data_recycle_task_t *task = my_malloc(sizeof(my_disk_data_recycle_task_t));
    my_disk_data_recycle_task_init(task, disk_data, record);

    my_buf_data_t *buf_data = my_buf_data_alloc(record.size);
    task->buf = my_buf_alloc(buf_data, buf_data->data, buf_data->data);
    my_buf_data_free(buf_data);

    // setup read
    my_aio_req_init(&task->aio_req, AIO_READ, disk_data->vinfo->fd, task->buf->end,
        record.size, record.offset);
    task->dispatch_event.data = &task->aio_req;
    task->dispatch_event.save_data = task;
    task->dispatch_event.handler = my_disk_data_recycle_read_done;
    my_aio_manager_post_event(disk_data->aio_manager, &task->dispatch_event);
}

static void
my_disk_data_recycle_done(my_disk_data_t *disk_data) {
    if (disk_data->recycle_task > 0) {
        return;
    }

    my_disk_vinfo_recycle_done(disk_data->vinfo, disk_data->recycle_len);

    my_spin_lock_lock(&disk_data->alloc_cursor_lock);
    my_dispatch_event_t *pending_event = disk_data->pending_alloc_cursor_event;
    if (pending_event) {
        my_event_loop_post_event(pending_event->setup_thread, pending_event);
    }
    my_spin_lock_unlock(&disk_data->alloc_cursor_lock);

    disk_data->recycle_event.data = disk_data;
    disk_data->recycle_event.handler = my_disk_data_do_recycle_handler;
    my_event_loop_add_ready_event(&disk_data->recycle_event);
}

static void
my_disk_data_do_recycle_handler(my_event_t *event) {
    my_disk_data_t *disk_data = event->data;

    my_bool_t need_recycle = MY_TRUE;
    int recycle_treshold = g_cache_conf.storage_cache.recycle_treshold;
    my_spin_lock_lock(&disk_data->recycle_lock);
    int64_t free_space_size = my_disk_vinfo_free_space_size(disk_data->vinfo);
    if (free_space_size > recycle_treshold * 2 * MY_DISK_BLOCK_SIZE) {
        need_recycle = MY_FALSE;
        disk_data->recycleing = MY_FALSE;
    }
    my_spin_lock_lock(&disk_data->recycle_lock);

    if (!need_recycle) {
        return;
    }

    my_disk_vinfo_recycle_cursor_t recycle_cursor = 
        my_disk_vinfo_next_recycle_cursor(disk_data->vinfo);
    if (!recycle_cursor.vaild) {
        // current event is recycle_event
        // current event handler is my_disk_data_do_recycle_handler
        // reschedule it
        return my_event_loop_add_ready_event(event);
    }

    my_disk_hotspot_record_t record;
    int64_t recycle_window_off = recycle_cursor.recycle_pos;
    int64_t recycle_window_len = recycle_cursor.recycle_len;
    int64_t recycle_len = recycle_cursor.recycle_len;

    for (; ;) {
        recycle_window_len = recycle_cursor.recycle_len - recycle_window_off +
            recycle_cursor.recycle_pos;
        // recycle done
        if (recycle_window_len == 0) {
            break;
        }

        if (recycle_window_len < 0) {
            MY_LOG_FATAL0("disk_data", "unreachable");
        }

        // there are no tasks to be recycled.
        if (!my_disk_hotspot_first_cover(disk_data->hotspot, recycle_window_off,
            recycle_window_len, &record)) {
            break;
        }

        // current record error, skip it
        if (record.size > MY_DISK_BLOCK_SIZE) {
            MY_LOG_WARN0("disk_data", "invalid size in recycle element, skip it");
            recycle_window_off = record.offset + 1;
            continue;
        }

        // record exceeds recycle space, intercepted.
        if (record.offset + record.size > recycle_window_off + recycle_window_len) {
            recycle_len = record.offset - recycle_cursor.recycle_pos;
            break;
        }

        my_disk_data_setup_recycle_record_task(disk_data, record);
        recycle_window_off = record.offset + record.size;

        disk_data->recycle_task++;
    }

    disk_data->recycle_len = recycle_len;
    return my_disk_data_recycle_done(disk_data);
}


static void
my_disk_data_check_and_notify_recycle(my_disk_data_t *disk_data) {
    my_spin_lock_lock(&disk_data->recycle_lock);
    int recycle_treshold = g_cache_conf.storage_cache.recycle_treshold;
    int64_t free_space_size = my_disk_vinfo_free_space_size(disk_data->vinfo);
    if (free_space_size < (recycle_treshold * MY_DISK_BLOCK_SIZE) &&
        !disk_data->recycleing) {

        disk_data->recycleing = MY_TRUE;
        disk_data->recycle_event.data = disk_data;
        disk_data->recycle_event.handler = my_disk_data_do_recycle_handler;
        my_event_loop_add_ready_event(&disk_data->recycle_event);
    }
    my_spin_lock_unlock(&disk_data->recycle_lock);
}


static void
my_disk_data_flush_buf_done(my_dispatch_event_t *dispatch_event) {
    my_aio_req_t *aio_req = dispatch_event->data;
    my_disk_data_t *disk_data = dispatch_event->save_data;
    my_disk_buf_t *buf = disk_data->pending_flush_buf;

    if (aio_req->eno != 0 || aio_req->res != aio_req->nbytes) {
        MY_LOG_ERROR("disk_data", "flush [%lld-%lld] disk buf failed, eno: %d, res: %d",
            buf->start_off, buf->write_off, aio_req->eno, aio_req->res);
    } else {
        MY_LOG_DEBUG("disk_data", "flush [%lld-%lld] disk buf success",
            buf->start_off, buf->write_off);
    }

    my_disk_membufs_del(&disk_data->mem_bufs, buf);
    my_disk_buf_pool_free(&disk_data->buf_pool, buf);
    my_spin_lock_lock(&disk_data->alloc_buf_lock);
    my_dispatch_event_t *pending_event = disk_data->pending_alloc_buf_event;
    disk_data->pending_alloc_buf_event = NULL;  // clear
    if (pending_event) {
        my_event_loop_post_event(pending_event->setup_thread, pending_event); // wakeup
    }
    my_spin_lock_unlock(&disk_data->alloc_buf_lock);

    disk_data->flush_event.data = disk_data;
    disk_data->flush_event.handler = my_disk_data_do_flush_handler;
    // return my_event_loop_add_ready_event(&disk_data->flush_event);
    // direct callbacks is more efficient than schedule.
    return my_disk_data_do_flush_handler(&disk_data->flush_event);
}

static void
my_disk_data_wait_and_flush_handler(my_event_t *event) {
    my_disk_data_t *disk_data = event->data;

    if (!my_disk_buf_can_flush(disk_data->pending_flush_buf)) {
        // current event is flush_event
        // current event handler is my_disk_wait_and_flush_handler
        return my_event_loop_add_ready_event(event);
    }

    my_disk_buf_t *pending_flush_buf = disk_data->pending_flush_buf;
    int64_t offset = disk_data->vinfo->data_offset + pending_flush_buf->start_off;
    my_aio_req_init(&disk_data->flush_aio_req, AIO_WRITE, disk_data->vinfo->fd,
        pending_flush_buf->buf, my_disk_buf_data_len(pending_flush_buf), offset);
    disk_data->dispatch_flush_event.data = &disk_data->flush_aio_req;
    disk_data->dispatch_flush_event.save_data = disk_data;
    disk_data->dispatch_flush_event.handler = my_disk_data_flush_buf_done;
    return my_aio_manager_post_event(disk_data->aio_manager, &disk_data->dispatch_flush_event);
}

static void
my_disk_data_do_flush_handler(my_event_t *event) {
    my_disk_data_t *disk_data = event->data;

    my_queue_t *last_q = NULL;

    my_spin_lock_lock(&disk_data->flush_lock);
    if (my_queue_empty(&disk_data->flush_pending_queue)) {
        disk_data->flushing = MY_FALSE;
    } else {
        last_q = my_queue_last(&disk_data->flush_pending_queue);
        my_queue_remove(last_q);
    }
    my_spin_lock_unlock(&disk_data->flush_lock);

    if (!last_q) {
        return; // task done
    }

    disk_data->pending_flush_buf = my_queue_data(last_q, my_disk_buf_t, queue);
    // flush_event
    event->handler = my_disk_data_wait_and_flush_handler;
    return my_disk_data_wait_and_flush_handler(event);
}


static void // here write_mutex is already hold
my_disk_data_notify_flush_buf(my_disk_data_t *disk_data) {
    my_spin_lock_lock(&disk_data->flush_lock);
    my_queue_insert_head(&disk_data->flush_pending_queue, &disk_data->active->queue);
    disk_data->active = NULL;
    if (!disk_data->flushing) {
        disk_data->flushing = MY_TRUE;
        disk_data->flush_event.data = disk_data;
        disk_data->flush_event.handler = my_disk_data_do_flush_handler;
        my_event_loop_add_ready_event(&disk_data->flush_event);
    }
    my_spin_lock_unlock(&disk_data->flush_lock);
}

static void
my_disk_data_write_done(my_disk_write_context_t *context) {
    return context->handler(context);
}

static void
my_disk_data_pre_write_done(my_disk_write_context_t *context) {
    my_cache_req_t *req = context->req;
    my_disk_data_t *disk_data = context->disk_data;
    my_mutex_unlock(&disk_data->write_mutex);

    context->offset = context->active_buf->start_off + context->offset_in_buffer;
    context->write_serial = context->active_buf->write_serial;
    my_disk_buf_write(context->active_buf, req->key, req->data,
        context->crc32, context->offset_in_buffer);
    
    return my_disk_data_write_done(context);
}

static void
my_disk_data_switch_alloc_buf_handler(my_dispatch_event_t *dispatch_event) {
    my_disk_write_context_t *context = dispatch_event->data;
    my_disk_data_t *disk_data = context->disk_data;

    my_disk_buf_t *buf = NULL;
    my_spin_lock_lock(&disk_data->alloc_buf_lock);
    buf = my_disk_buf_pool_alloc(&disk_data->buf_pool,
        context->write_cursor.write_serial, context->write_cursor.write_pos);
    if (buf) {
        // add pending event
        disk_data->pending_alloc_buf_event = dispatch_event;
    }
    my_spin_lock_unlock(&disk_data->alloc_buf_lock);

    if (!buf) {
        return; // waiting for callback
    }

    disk_data->active = buf;
    my_disk_membufs_put(&disk_data->mem_bufs, buf);

    int64_t offset_in_buffer = 0;
    my_cache_err_t err = my_disk_buf_write_hold(buf, context->aligned_size,
        &offset_in_buffer);
    if (err != CACHE_ERR_NO_ERR) {
        MY_LOG_FATAL0("disk_data", "buf_write_hold failed in new disk buf");
    }

    context->active_buf = buf;
    context->offset_in_buffer = offset_in_buffer;

    return my_disk_data_pre_write_done(context);
}

static void
my_disk_data_switch_alloc_cursor_handler(my_dispatch_event_t *dispatch_event) {
    my_disk_write_context_t *context = dispatch_event->data;
    my_disk_data_t *disk_data = context->disk_data;

    my_disk_data_check_and_notify_recycle(disk_data);
    my_disk_data_notify_flush_buf(disk_data);

    my_disk_vinfo_write_cursor_t write_cursor;
    my_spin_lock_lock(&disk_data->alloc_cursor_lock);
    write_cursor = my_disk_vinfo_next_write_cursor(disk_data->vinfo);
    if (!write_cursor.vaild) {
        // add pending event
        disk_data->pending_alloc_cursor_event = dispatch_event;
    }
    my_spin_lock_unlock(&disk_data->alloc_cursor_lock);

    if (!write_cursor.vaild) {
        return; // waiting for callback
    }

    context->write_cursor = write_cursor;
    // dispatch_event->setup_thread = current_thread();
    // dispatch_event->data = context;
    dispatch_event->handler = my_disk_data_switch_alloc_buf_handler;
    return my_disk_data_switch_alloc_buf_handler(dispatch_event);
}

static void
my_disk_data_pre_write(my_disk_write_context_t *context) {
    my_disk_data_t *disk_data = context->disk_data;
    int64_t offset_in_buffer = 0;
    my_cache_err_t err = my_disk_buf_write_hold(disk_data->active,
        context->aligned_size, &offset_in_buffer);
    if (err != CACHE_ERR_NO_ERR) {
        my_dispatch_event_t *dispatch_event = &context->req->dispatch_event;
        dispatch_event->setup_thread = current_thread_base();
        dispatch_event->data = context;
        dispatch_event->handler = my_disk_data_switch_alloc_cursor_handler;
        return my_disk_data_switch_alloc_cursor_handler(dispatch_event);
    }

    context->active_buf = disk_data->active;
    context->offset_in_buffer = offset_in_buffer;
    return my_disk_data_pre_write_done(context);
}


static void
my_disk_data_write_handler(my_event_t *event) {
    my_disk_write_context_t *context = event->data;
    my_cache_req_t *req = context->req;
    my_disk_data_t *disk_data = context->disk_data;
    if (!my_mutex_trylock(&disk_data->write_mutex)) {
        if (context->retry_count > MY_MAX_RETRY_NUM) {
            req->err = CACHE_ERR_RETRY_LIMIT;
            req->errmsg = "disk cache busy, retry count reached limits";
            return my_disk_data_write_done(context);
        }

        context->retry_count++;
        return my_event_loop_add_timer(event, MY_RETRY_TIMEOUT_MS);
    }

    return my_disk_data_pre_write(context);
}

void
my_disk_data_write(my_disk_data_t *disk_data, my_disk_write_context_t *context) {
    my_cache_req_t *req = context->req;
    context->disk_data = disk_data;
    context->object_size = sizeof(my_disk_cache_object_t) + req->key->key_len +
        my_buf_chain_total_bytes(req->data);
    context->aligned_size = MY_ALIGN_PAGE_SIZE(context->object_size);
    if (context->aligned_size > MY_DISK_BLOCK_SIZE) {
        req->err = CACHE_ERR_INVALID;
        req->errmsg = "object_size cannot exceed the limit of block_size";
        return my_disk_data_write_done(context);
    }

    uint32_t crc32 = my_buf_chain_crc32(req->data);
    context->crc32 = crc32;

    my_event_t *event = &context->req->event;
    event->data = context;
    event->handler = my_disk_data_write_handler;
    return my_disk_data_write_handler(event);
}

static void
my_disk_data_read_done(my_disk_read_context_t *context) {
    if (context->buf) {
        my_buf_free(context->buf);
        context = NULL;
    }

    return context->handler(context);
}

static void
my_disk_data_read_in_file_done(my_dispatch_event_t *dispatch_event) {
    my_aio_req_t *aio_req = dispatch_event->data;
    my_disk_read_context_t *context = dispatch_event->save_data;
    my_cache_req_t *req = context->req;
    my_buf_t *buf = context->buf;

    if (aio_req->eno != 0 || aio_req->res != aio_req->nbytes) {
        MY_LOG_ERROR("disk_data", "read in disk file failed, eno: %lld, res: %lld, except: %lld",
            aio_req->eno, aio_req->res, aio_req->nbytes);
        req->err = CACHE_ERR_IO_ERR;
        req->errmsg = "read in disk file failed";
        return my_disk_data_read_done(context); // failed
    }
    buf->end = buf->pos + aio_req->res;

    if (aio_req->nbytes < sizeof(my_disk_cache_object_t)) {
        MY_LOG_ERROR("disk_data", "to short object, read nbytes: %lld, "
            "my_disk_cache_object_t: %lld", aio_req->nbytes,
            sizeof(my_disk_cache_object_t));
        req->err = CACHE_ERR_INVALID;
        req->errmsg = "to short object";
        return my_disk_data_read_done(context); // failed
    }

    my_disk_cache_object_t *obj = (my_disk_cache_object_t *)context->buf->pos;
    int64_t object_size = sizeof(my_disk_cache_object_t) + obj->key_len + obj->data_len;
    if (aio_req->nbytes < object_size) {
        MY_LOG_ERROR("disk_data", "to short object, read nbytes: %lld, object size: %lld",
            aio_req->nbytes, object_size);
        req->err = CACHE_ERR_INVALID;
        req->errmsg = "to short object";
        return my_disk_data_read_done(context);
    }

    my_cache_key_t *key = req->key;
    if (key->key_len != obj->key_len ||
        memcmp(key->key, buf->pos + sizeof(my_disk_cache_object_t), key->key_len) != 0) {
        MY_LOG_ERROR0("disk_data", "key no matched in disk buf reader");
        req->err = CACHE_ERR_INVALID;
        req->errmsg = "key no matched";
        return my_disk_data_read_done(context);
    }

    buf->pos += (sizeof(my_disk_cache_object_t) + obj->key_len);
    buf->end = buf->pos + obj->data_len;

    uint32_t crc32 = my_buf_crc32(buf);
    if (crc32 != obj->crc32) {
        MY_LOG_ERROR0("disk_data", "crc32 no matched");
        req->err = CACHE_ERR_INVALID;
        req->errmsg = "crc32 no matched";
        return my_disk_data_read_done(context);
    }

    my_buf_chain_append(req->data, buf);
    return my_disk_data_read_done(context); // success
}

static my_cache_err_t
my_disk_data_read_in_buf(my_disk_data_t *disk_data, my_disk_read_context_t *context) {
    my_cache_req_t *req = context->req;
    my_disk_buf_t *buf = my_disk_membufs_get(&disk_data->mem_bufs,
        context->offset, context->aligned_size);
    if (!buf) {
        return CACHE_ERR_ALREADY_FLUSHED;
    }

    my_cache_err_t err = my_disk_buf_read_hold(buf, context->write_serial, context->offset,
        context->aligned_size);
    if (err != CACHE_ERR_NO_ERR) {
        return err;
    }

    return my_disk_buf_read(buf, req->key, context->offset, context->aligned_size,req->data);
}

void
my_disk_data_read(my_disk_data_t *disk_data, my_disk_read_context_t *context) {
    my_cache_req_t *req = context->req;
    context->disk_data = disk_data;
    
    my_cache_err_t err = my_disk_data_read_in_buf(disk_data, context);
    if (err == CACHE_ERR_NO_ERR) {
        req->err = CACHE_ERR_NO_ERR;
        return my_disk_data_read_done(context); // success
    }

    if (err != CACHE_ERR_ALREADY_FLUSHED) {
        req->err = err;
        req->errmsg = "failed in my_disk_data_read_in_buf";
        return my_disk_data_read_done(context); // failed
    }

    my_buf_data_t *buf_data = my_buf_data_alloc(context->aligned_size);
    my_buf_t *buf = my_buf_alloc(buf_data, buf_data->data, buf_data->data);
    context->buf = buf;

    my_aio_req_init(&context->aio_req, AIO_READ, disk_data->vinfo->fd,
        buf->end, context->aligned_size,
        disk_data->vinfo->data_offset + context->offset);

    my_dispatch_event_t *dispatch_event = &context->req->dispatch_event;
    dispatch_event->data = &context->aio_req;
    dispatch_event->save_data = context;
    dispatch_event->handler = my_disk_data_read_in_file_done;
    return my_aio_manager_post_event(disk_data->aio_manager, dispatch_event);
}

static my_disk_vinfo_write_cursor_t
my_disk_get_write_cursir_in_start(my_disk_data_t *disk_data) {
    my_disk_vinfo_write_cursor_t write_cursor =
        my_disk_vinfo_next_write_cursor(disk_data->vinfo);
    if (write_cursor.vaild) {
        return write_cursor;
    }

    // force recycle
    my_disk_vinfo_recycle_cursor_t recycle_cursor = 
    my_disk_vinfo_next_recycle_cursor(disk_data->vinfo);
    if (!recycle_cursor.vaild) {
        MY_LOG_FATAL0("disk_data", "both recycle cursor and write cursor are invalid");
    }

    my_disk_vinfo_recycle_done(disk_data->vinfo, recycle_cursor.recycle_len);

    write_cursor = my_disk_vinfo_next_write_cursor(disk_data->vinfo);
    if (!write_cursor.vaild) {
        MY_LOG_FATAL0("disk_data", "unrecoverable write cursor invalid");
    }

    return write_cursor;
}

my_bool_t
my_disk_data_open(my_disk_data_t *disk_data, my_disk_vinfo_t *vinfo,
    my_disk_hotspot_t *hotspot, my_aio_manager_t *aio_manager,
    my_disk_volume_t *volume, my_disk_data_reinsert_pt reinsert_handler) {

    disk_data->vinfo = vinfo;
    disk_data->hotspot = hotspot;
    disk_data->aio_manager = aio_manager;
    disk_data->volume = volume;
    disk_data->reinsert_handler = reinsert_handler;

    int buf_pool_capacity = g_cache_conf.storage_cache.buf_pool_capacity;
    my_disk_buf_pool_open(&disk_data->buf_pool, buf_pool_capacity);

    // must successed
    my_disk_vinfo_write_cursor_t write_cursor = my_disk_get_write_cursir_in_start(disk_data);

    disk_data->active = my_disk_buf_pool_alloc(&disk_data->buf_pool, write_cursor.write_serial,
        write_cursor.write_pos);
    assert(disk_data->active);

    my_disk_membufs_put(&disk_data->mem_bufs, disk_data->active);

    disk_data->open = MY_TRUE;
    return MY_TRUE;
}

void
my_disk_data_close(my_disk_data_t *disk_data) {
    if (!disk_data->open) {
        return;
    }

    my_disk_buf_pool_close(&disk_data->buf_pool);
    disk_data->open = MY_FALSE;
}