#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/bio.h>
#include <linux/init.h>
#include <linux/compiler.h>
#include <linux/blktrace_api.h>
#include <linux/hritimer.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/cgroup.h>
#include <linux/elevator.h>
#include <linux/ktime.h>
#include <linux/rbtree.h>
#include <linux/ioprio.h>
#include <linux/sbitmap.h>
#include <linux/delay.h>
#include <linux/backing-dev.h>

#include <trace/events/block.h> 


#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-tag.h"
#include "blk-mq-shced.h"



static int row_init_queue(struct request_queue *q, struct elevator_type *e)
{}

static void row_exit_queue(struct elevator_queue *q)
{}

static int row_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
{}

static void row_exit_hctx(struct blk_mq_hw_ctx *hctx)
{}

static void row_depth_updated(struct blk_mq_hw_ctx *hctx)
{}

static bool row_allow_bio_merge(struct request_queue *q, struct request *rq, struct bio *bio)
{}

static bool row_bio_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs)
{}

static void row_finish_requeue(struct request *rq)
{}

static int row_request_merge(struct request_queue *q, struct request **req, struct bio *bio)
{}

static void row_request_merged(struct request_queue *q, struct request *req, enum elv_merge type)
{}

static void requests_merged(struct request_queue *q, struct request *rq)
{}

static void row_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
{}

static void row_prepare_request(struct request *rq)
{
    rq -> elv.priv[0] = rq -> elv.priv[1] = NULL;
}

static void row_finish_request(struct request *rq)
{}

static void row_insert_requests(struct blk_mq_hw_ctx *hctx, struct request *rq)
{}

static struct request *row_dispatch_request(struct blk_mq_hw_ctx *hctx)
{}


static bool row_has_work(struct blk_mq_hw_ctx *hctx)
{}

static void row_completed_request(struct row_queue *rowq, struct row_data *rowq)
{}


static row_init_icq()


static void row_exit_icq(struct io_cq *icq)
{}







//  Helping sysfs functions/defenitions for ROW attributes
static ssize_t row_var_show(unsigned int var , char *page)
{
    return sprintf(page, "%d\n", var);
}

static ssize_t row_var_store(unsigned int *var, char *page, ssize_t count)
{
    int err;
    err = kstrtoul(page, 10, (unsigned long *)var);
    return count;
}


#define SHOW_FUCTION(__FUNC, __VAR) \  
static ssize_t __FUNC(struct elevator_queue *e, char *page) \
{
    struct row_data *rowd -> elevator_data;
    int __data = __VAR;
    return row_var_show(__data, (page));
}
SHOW_FUNCTION(row_hp_read_quantum_show,
	rowd->row_queues[ROWQ_PRIO_HIGH_READ].disp_quantum);
SHOW_FUNCTION(row_rp_read_quantum_show,
	rowd->row_queues[ROWQ_PRIO_REG_READ].disp_quantum);
SHOW_FUNCTION(row_hp_swrite_quantum_show,
	rowd->row_queues[ROWQ_PRIO_HIGH_SWRITE].disp_quantum);
SHOW_FUNCTION(row_rp_swrite_quantum_show,
	rowd->row_queues[ROWQ_PRIO_REG_SWRITE].disp_quantum);
SHOW_FUNCTION(row_rp_write_quantum_show,
	rowd->row_queues[ROWQ_PRIO_REG_WRITE].disp_quantum);
SHOW_FUNCTION(row_lp_read_quantum_show,
	rowd->row_queues[ROWQ_PRIO_LOW_READ].disp_quantum);
SHOW_FUNCTION(row_lp_swrite_quantum_show,
	rowd->row_queues[ROWQ_PRIO_LOW_SWRITE].disp_quantum);
SHOW_FUNCTION(row_rd_idle_data_show, rowd->rd_idle_data.idle_time_ms);
SHOW_FUNCTION(row_rd_idle_data_freq_show, rowd->rd_idle_data.freq_ms);
SHOW_FUNCTION(row_reg_starv_limit_show,
	rowd->reg_prio_starvation.starvation_limit);
SHOW_FUNCTION(row_low_starv_limit_show,
	rowd->low_prio_starvation.starvation_limit);
#undef SHOW_FUNCTION



#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)
{
    struct row_data *rowd = e->elevator_data;			\
	int __data;						\
	int ret = row_var_store(&__data, (page), count);		\
	if (__data < (MIN))						\
		__data = (MIN);						\
	else if (__data > (MAX))					\
		__data = (MAX);						\
	*(__PTR) = __data;						\
	return ret;							\
}
STORE_FUNCTION(row_hp_read_quantum_store,
&rowd->row_queues[ROWQ_PRIO_HIGH_READ].disp_quantum, 1, INT_MAX);
STORE_FUNCTION(row_rp_read_quantum_store,
			&rowd->row_queues[ROWQ_PRIO_REG_READ].disp_quantum,
			1, INT_MAX);
STORE_FUNCTION(row_hp_swrite_quantum_store,
			&rowd->row_queues[ROWQ_PRIO_HIGH_SWRITE].disp_quantum,
			1, INT_MAX);
STORE_FUNCTION(row_rp_swrite_quantum_store,
			&rowd->row_queues[ROWQ_PRIO_REG_SWRITE].disp_quantum,
			1, INT_MAX);
STORE_FUNCTION(row_rp_write_quantum_store,
			&rowd->row_queues[ROWQ_PRIO_REG_WRITE].disp_quantum,
			1, INT_MAX);
STORE_FUNCTION(row_lp_read_quantum_store,
			&rowd->row_queues[ROWQ_PRIO_LOW_READ].disp_quantum,
			1, INT_MAX);
STORE_FUNCTION(row_lp_swrite_quantum_store,
			&rowd->row_queues[ROWQ_PRIO_LOW_SWRITE].disp_quantum,
			1, INT_MAX);
STORE_FUNCTION(row_rd_idle_data_store, &rowd->rd_idle_data.idle_time_ms,
			1, INT_MAX);
STORE_FUNCTION(row_rd_idle_data_freq_store, &rowd->rd_idle_data.freq_ms,
			1, INT_MAX);
STORE_FUNCTION(row_reg_starv_limit_store,
			&rowd->reg_prio_starvation.starvation_limit,
			1, INT_MAX);
STORE_FUNCTION(row_low_starv_limit_store,
			&rowd->low_prio_starvation.starvation_limit,
			1, INT_MAX);
#undef STORE_FUNCTION







#define ROW_ATTR(name) \
    __ATTR(name , S_IRUGO|S_IWUSR, row_##name##_show, row_##name##_store)

static struct elv_fs_entry row_attrs[] = {
	ROW_ATTR(hp_read_quantum),
    ROW_ATTR(rp_read_quantum),
    ROW_ATTR(hp_swrite_quantum),
    ROW_ATTR(rp_swrite_quantum),
	ROW_ATTR(rp_write_quantum),
	ROW_ATTR(lp_read_quantum),
	ROW_ATTR(lp_swrite_quantum),
	ROW_ATTR(rd_idle_data),
	ROW_ATTR(rd_idle_data_freq),
	ROW_ATTR(reg_starv_limit),
	ROW_ATTR(low_starv_limit),
	__ATTR_NULL
};

static struct elevator_type iosched_row_mq ={
    .ops = {
        .init_sched = row_init_queue,
        .exit_sched = row_exit_queue,
        .init_hctx = row_init_hctx,
        .exit_hctx = row_exit_hctx, 
        .depth_updated = row_depth_updated,
        .allow_merge = row_allow_bio_merge,
        .bio_merge = row_bio_merge,
        .requeue_request =  row_finish_requeue_request,
        .request_merge = row_request_merge, 
        .request_merged = row_request_merged,
        .requests_merged = requests_merged,
        .limit_depth = row_limit_depth,
        .prepare_request = row_prepare_request,
        .finish_request = row__finish_requeue_request,
        .insert_requests = row_insert_requests,
        .dispatch_request = row_dispatch_request,
        .has_work = row_has_work,
        .completed_request = row_completed_request,
        .former_request = elv_rb_former_request,
        .next_request = elv_rb_latter_request, 
        .init_icq = row_init_icq,
        .exit_icq = row_exit_icq,
    },
        
    .icq_size = sizeof(struct row_io_cq) ,
    .icq_align = __alignof__(struct row_io_cq) ,
    .elevator_attrs = row_attrs,
    .elevator_name = "row" ,
    .elevator_owner = THIS_MODULE,
};
MODULE_ALIAS("row-iosched");

static int __init row_init(void)
{
    elv_register(&iosched_row_mq);
    return 0;
}

static void __exit row_exit(void)
{
    elv_unregister(&iosched_row_mq);
}


module_init(row_init);
module_exit(row_exit);

MODULE_AUTHOR("xxx");
MODULE_LICENSE("GPL");
MOUDLE_DESCRIPTION("Read Over Write IO Scheduler to support MQ");