/**
 *
 * asd_asdattr.c -- Active Storage Device
 *
 * Author: Zhao guangliang
 *	 
 *
 *		National Research Center
 *	     for High Performance Computers
 *
 * Copyright (c) 2005, 2006, 2007 NRCHPC, China	
 *
 **/

#include <linux/gfp.h>
#include <linux/rbtree.h>
#include <linux/bio.h>
#include "asd_kernel.h"
#include "asd_asdattr.h"
#include "asd_swap.h"
#include "asd_klog.h"
#include "asd_log_unit.h"
#include "asd_pool.h"

#ifdef _ASD_STUB_TEST
#include "sm_stub.h"
#endif

#define LOG_ID  LOG_ID_ASDATTR

extern void asd_insert_ref(struct rb_root *, struct ref *);
extern struct ref *asd_search_ref(struct rb_root *, __u64, __u64);
extern int asd_io_async(struct io_region *, int, 
	struct page_list *, bio_end_io_t *, void *, private_work_fn, void *);
extern void asd_set_bio_flag(struct bio *, void *);
/*
 * functions for pending_prop_node
 */
void pending_prop_node_cleanup(pending_prop_node_t *node)
{
	ref_t *ref = NULL, *n = NULL;
	__ENTER__("node:%p", node);
	list_for_each_entry_safe(ref, n, &node->ref_list, list)
		ref_put(ref);
	__LEAVE__("");
}
void add_pending_prop_to_log(pending_prop_node_t *node, asd_log_obj_t *log_obj)
{
	__ENTER__("node:%p:log_obj:%p", node, log_obj);
	spin_lock(&log_obj->log_lock);
	list_add(&node->log_list, &log_obj->pending_prop_list);
	spin_unlock(&log_obj->log_lock);
	__LEAVE__("");
}

void del_pending_prop_from_log(pending_prop_node_t *node, asd_log_obj_t *log_obj)
{
	__ENTER__("node:%p:log_obj:%p", node, log_obj);
	spin_lock(&log_obj->log_lock);
	list_del(&log_obj->pending_prop_list);
	spin_unlock(&log_obj->log_lock);
	__LEAVE__("");
}

struct pending_prop_node_ops pending_prop_node_ops =  {
	.add_pending_prop_to_log = add_pending_prop_to_log,
	.del_pending_prop_from_log = del_pending_prop_from_log,
	.cleanup = pending_prop_node_cleanup,
};

int init_pending_prop_node(pending_prop_node_t *node, 
	__u64 mblk, __u8 prop)
{
	ref_t *ref[2];
	int ret = 0, i;
	__ENTER__("");
	
	memset(ref, 0, sizeof(ref_t *) * 2);
	for (i = 0; i < 2; i++) {
		ref[i] = ref_get(NULL, GFP_KERNEL);
		if (IS_ERR(ref[i])) {
			klog(ERROR, "failed reserve ref[%d] for node:%p\n", i, node);
			ret = PTR_ERR(ref[i]);
			goto err_out;
		}
	}
	
	node->mblk = mblk;
	node->prop = prop;
	node->flush_cnt = 0;
	node->result = 0;
	INIT_LIST_HEAD(&node->ref_list);
	for (i = 0; i < 2; i++)
		list_add(&ref[i]->list, &node->ref_list);//reserved for use by log write callback.
	atomic_set(&node->ref_cnt, 1);//ref_cnt of the node, the the number of ref on ref_list.
	node->lock = SPIN_LOCK_UNLOCKED;
	INIT_LIST_HEAD(&node->log_list);//for log obj
	node->ops = &pending_prop_node_ops;
	
	goto out;
err_out:
	for (i = 0; i < 2; i++) {
		if( !(ref[i] == NULL || IS_ERR(ref[i])) )
			ref_put(ref[i]);
	}
out:
	__LEAVE__("ret:%d", ret);
	return ret;
}

/*
 * functions for pending_prop_obj
 */
int pending_prop_set(pending_prop_obj_t *obj, __u64 mblk, __u64 len, __u8 prop)
{		
	pending_prop_node_t * node = NULL;
	asd_dev_t *asd = obj->ops->get_host(obj);
	struct tree_part *part = get_map_treepart(&asd->map, mblk);
	asd_log_obj_t *log_obj = asd->log_obj;
	map_extent_t *ret_ext;
	int ret = 0;
	unsigned long flag_map;
	
	__ENTER__("");

	node = kmalloc(sizeof(pending_prop_node_t), GFP_KERNEL);
	if (!node) {
		ret = -ENOMEM;
		klog(ERROR, "Failed to malloc for pending_prop_node\n");
		goto out;
	}

	ret = init_pending_prop_node(node, mblk, prop);
	if (ret < 0) {
		klog(ERROR, "Failed to init pending_prop_node\n");
		goto init_err;
	}

	write_lock(&obj->rwlock);
	ret = radix_tree_insert(obj->root, mblk, node);
	if (ret < 0) {
		klog(ERROR, "Failed to insert node to pending_prop_obj\n");
		write_unlock(&obj->rwlock);
		goto insert_err;
	}
	obj->node_cnt++;
	atomic_inc(&asd->pending_prop_cnt);
	write_unlock(&obj->rwlock);

	read_lock_irqsave(&part->lock, flag_map); 
	ret_ext = asd->map.ops->search_mext(part, mblk);
	if (ret_ext) {
		node->ops->add_pending_prop_to_log(node, log_obj);
	}
	read_unlock_irqrestore(&part->lock, flag_map); 

	goto out;
insert_err:
	node->ops->cleanup(node);
init_err:
	kfree(node);
out:
	__LEAVE__("ret:%d", ret);
	return ret;
}

pending_prop_node_t * pending_prop_query(pending_prop_obj_t *obj, __u64 mblk, __u8 *rst)
{
	pending_prop_node_t *node = NULL;

	__ENTER__("obj:%p:mblk:%llu", obj, mblk);
	read_lock(&obj->rwlock);
	node = (pending_prop_node_t *)radix_tree_lookup(obj->root, mblk);
	if (node)
		*rst = node->prop;
	read_unlock(&obj->rwlock);
	__LEAVE__("node:%p:rst:%u", node, *rst);
	return node;
}

pending_prop_node_t * pending_prop_node_get(pending_prop_obj_t *obj, __u64 mblk)
{
	pending_prop_node_t *node = NULL;

	__ENTER__("obj:%p:mblk:%llu", obj, mblk);
	read_lock(&obj->rwlock);
	node = (pending_prop_node_t *)radix_tree_lookup(obj->root, mblk);
	if (node)
		atomic_inc(&node->ref_cnt);
	read_unlock(&obj->rwlock);
	__LEAVE__("node:%p", node);
	return node;
}

void pending_prop_node_put(pending_prop_obj_t *obj, pending_prop_node_t *node)
{
	asd_dev_t *asd = obj->ops->get_host(obj);
	asd_log_obj_t *log_obj = asd->log_obj;

	__ENTER__("obj:%p:node:%p:mblk:%llu", obj, node, node->mblk);
	write_lock(&obj->rwlock);
	if (atomic_dec_and_test(&node->ref_cnt)) {
		if (!list_empty(&node->log_list))
			node->ops->del_pending_prop_from_log(node, log_obj);
		radix_tree_delete(obj->root, node->mblk);
		node->ops->cleanup(node);
		kfree(node);
		atomic_dec(&asd->pending_prop_cnt);
		obj->node_cnt--;
	}
	write_unlock(&obj->rwlock);
	__LEAVE__("");
}

asd_dev_t *pending_obj_get_host(pending_prop_obj_t *obj)
{
	return obj->host;
}

void pending_prop_obj_cleanup(pending_prop_obj_t *obj)
{
	__ENTER__("");
	if (obj->node_cnt != 0)
		klog(ERROR, "There are %u nodes in pending_prop tree, cleanup anyway", obj->node_cnt);
	
	{
		asd_dev_t *asd = obj->ops->get_host(obj);
		pending_prop_node_t *nodes[obj->node_cnt];
		unsigned int cnt, i;

		write_lock(&obj->rwlock);
		cnt = radix_tree_gang_lookup(obj->root, (void **)nodes, 0, obj->node_cnt);
		for (i = 0; i < cnt; i++) {
			radix_tree_delete(obj->root, nodes[i]->mblk);
			pending_prop_node_cleanup(nodes[i]);
			kfree(nodes[i]);
			obj->node_cnt--;
			atomic_dec(&asd->pending_prop_cnt);
		}
		write_unlock(&obj->rwlock);
		X_ASSERT(obj->node_cnt == cnt);
	}
}

struct pending_prop_ops pending_prop_ops =	{
	.pending_prop_set =  pending_prop_set,
	.pending_prop_query = pending_prop_query,
	.pending_prop_node_get = pending_prop_node_get,
	.pending_prop_node_put = pending_prop_node_put,
	.get_host = pending_obj_get_host,
	.cleanup = pending_prop_obj_cleanup,
};

void init_pending_prop_obj(pending_prop_obj_t *obj, asd_dev_t *asd)
{
	__ENTER__("");
	INIT_RADIX_TREE(obj->root, GFP_KERNEL);
	obj->ops =  &pending_prop_ops;
	obj->host = asd;
	obj->rwlock = RW_LOCK_UNLOCKED;
	obj->node_cnt = 0;
	__LEAVE__("");
}

/*
 *funcions for phy_prop_node_ops
 */

void set_prop(phy_prop_node_t *node, __u64 pbid, ref_t * ref, __u8 prop)
{
	__u32 offset_in_gran;
	struct phy_prop_obj * obj = node->ops->get_host(node);
	phy_dev_t *phy_dev = obj->ops->get_host(obj);
	asd_pool_t *ap = phy_dev->asdpool;
	unsigned long flag;
	
	__ENTER__("node:%p:pbid:%llu:ref:%p", node, pbid, ref);
	offset_in_gran = pbid % BITS_PER_PROP_NODE(ap);
	
	write_lock_irqsave(&node->rwlock, flag);
	if(prop){
		set_bit(offset_in_gran, node->gran.prop_disk_pages);
	}else{
		clear_bit(offset_in_gran, node->gran.prop_disk_pages);
	}
	if (test_bit(BIT_FLUSH, &node->flag))
		set_bit(BIT_COPY, &node->flag);
	else 
		set_bit(BIT_DIRTY, &node->flag);
	asd_insert_ref(node->wait_ref_root, ref);
	atomic_inc(&node->wait_ref_cnt);
	write_unlock_irqrestore(&node->rwlock, flag);

	ap->pool_prop_obj->ops->handle_phy_prop_node(ap->pool_prop_obj, node);	
	__LEAVE__("");
}

/* call by revoking write  */
void clear_prop(phy_prop_node_t *node, __u64 pbid, __u8 *prop)
{
	__u32 offset_in_gran;
	struct phy_prop_obj * obj = node->ops->get_host(node);
	phy_dev_t *phy_dev = obj->ops->get_host(obj);
	asd_pool_t *ap = phy_dev->asdpool;
	ref_t *ref;
	unsigned long flag;
	
	__ENTER__("node:%p:pbid:%llu:ref:%p", node, pbid, ref);
	offset_in_gran = pbid % BITS_PER_PROP_NODE(ap);
	
	write_lock_irqsave(&node->rwlock, flag);
	if (test_and_clear_bit(offset_in_gran, node->gran.prop_disk_pages))
		*prop = 1;
	else
		*prop = 0;
	if (test_bit(BIT_FLUSH, &node->flag))
		set_bit(BIT_COPY, &node->flag);
	else 
		set_bit(BIT_DIRTY, &node->flag);
	ref = asd_search_ref(node->wait_ref_root, phy_dev->phy_index, pbid);
	if (ref) {
		rb_erase(&ref->node, node->wait_ref_root);
		atomic_dec(&node->wait_ref_cnt);
	}
	write_unlock_irqrestore(&node->rwlock, flag);
	ap->pool_prop_obj->ops->handle_phy_prop_node(ap->pool_prop_obj, node);	
	
	/* to avoid nested locking */
	if (ref)
		ref_put(ref);
	__LEAVE__("");

}

void query_prop(phy_prop_node_t *node, __u64 pbid, __u8 *prop)
{
	__u32 offset_in_gran;
	struct phy_prop_obj * obj = node->ops->get_host(node);
	phy_dev_t *phy_dev = obj->ops->get_host(obj);
	asd_pool_t *ap = phy_dev->asdpool;
	unsigned long flag;
	
	__ENTER__("node:%p:pbid:%llu", node, pbid);
	offset_in_gran = pbid % BITS_PER_PROP_NODE(ap); 
	
	read_lock_irqsave(&node->rwlock, flag);
	if (test_bit(offset_in_gran, node->gran.prop_disk_pages))
		*prop = 1;
	else 
		*prop = 0;
	read_unlock_irqrestore(&node->rwlock, flag);
	__LEAVE__("prop:%p:*prop:%u", prop, *prop);
}

void end_load(phy_prop_node_t *node, struct page_list *pl)
{
	struct page_list *pl_pos = pl;
	prop_disk_page_t *prop_page_pos = node->gran.prop_disk_pages;
//	int i = 0;
	while(pl_pos) {
		__DEBUG__("page %d", i++);
		memcpy(prop_page_pos, page_address(pl_pos->page), PAGE_SIZE);
		pl_pos = pl_pos->next;
		prop_page_pos++;
	}
}
/*
 * return:
 * 0: well  prepared
 * 1: flushing or not dirty, don't flush
 */
int start_commit(phy_prop_node_t *node, struct page_list *pl)
{
	unsigned long flag;
	int ret = 0, ref_cnt;
	struct page_list *pl_pos = pl;
	prop_disk_page_t *prop_page_pos = node->gran.prop_disk_pages;

	__ENTER__("node:%p", node);
	write_lock_irqsave(&node->rwlock, flag);
	if (!test_bit(BIT_DIRTY, &node->flag) 
		|| test_and_set_bit(BIT_FLUSH, &node->flag)) {
		ret = 1;
		goto out;
	}
	X_ASSERT(!atomic_read(&node->commit_ref_cnt));
	ref_cnt = atomic_read(&node->wait_ref_cnt);
	if (ref_cnt) {
		node->commit_ref_root->rb_node = node->wait_ref_root->rb_node;
		atomic_set(&node->commit_ref_cnt, ref_cnt);
		node->wait_ref_root->rb_node = NULL;
		atomic_set(&node->wait_ref_cnt, 0);
	}
	while(pl_pos) {
		__DEBUG__("page %d", i++);
		memcpy(page_address(pl_pos->page), prop_page_pos, PAGE_SIZE);
		pl_pos = pl_pos->next;
		prop_page_pos++;
	}
out:
	write_unlock_irqrestore(&node->rwlock, flag);
	__LEAVE__("ret:%d", ret);
	return ret;
}

static int handle_ref4tree
(struct rb_root *from, struct rb_root *to, struct list_head *list)
{
	struct rb_node *rb_node, *next;
	ref_t *ref;
	int cnt = 0;

	__ENTER__("from:%p:to:%p", from, to);
	rb_node = rb_first(from);
	while(rb_node) {
		ref = container_of(rb_node, ref_t, node);
		next = rb_next(rb_node);
		if (likely(!to)){
			rb_erase(rb_node, from);
			INIT_LIST_HEAD(&ref->list);
			list_add(&ref->list, list);
		}
		else
			asd_insert_ref(to, ref);
		cnt++;
		rb_node = next;
	}
	__LEAVE__("cnt:%d", cnt);
	return cnt;
}

/* in interrupt context */
void end_commit(phy_prop_node_t *node, int err)
{

	int cnt = 0;
	struct list_head del_list;
	struct phy_prop_obj * obj = node->ops->get_host(node);
	phy_dev_t *phy_dev = obj->ops->get_host(obj);
	asd_pool_t *ap = phy_dev->asdpool;
	ref_t *ref = NULL, *n = NULL;
	
	__ENTER__("node:%p:err:%d", node, err);
	
	write_lock(&node->rwlock);
	clear_bit(BIT_FLUSH, &node->flag);
	atomic_set(&node->commit_ref_cnt, 0);
	if(err) {
		INIT_LIST_HEAD(&del_list);
		cnt = handle_ref4tree(node->commit_ref_root, NULL, &del_list);
		if (!test_and_clear_bit(BIT_COPY, &node->flag))
			clear_bit(BIT_DIRTY, &node->flag);
		atomic_sub(cnt, &(ap->pool_prop_obj->total_ref_cnt));
	} else {
		cnt = handle_ref4tree(node->commit_ref_root, node->wait_ref_root, NULL);
		atomic_add(cnt, &node->wait_ref_cnt);
		clear_bit(BIT_COPY, &node->flag);
	}
	write_unlock(&node->rwlock);

	/* to put refs */
	list_for_each_entry_safe(ref, n, &del_list, list) {
		list_del(&ref->list);
		ref_put(ref);
	}
		
	ap->pool_prop_obj->ops->handle_phy_prop_node(ap->pool_prop_obj, node);
	__LEAVE__("cnt:%d", cnt);
}

struct phy_prop_obj *phy_prop_node_get_host(phy_prop_node_t *node)
{
	return node->host;
}

void phy_prop_node_cleanup(phy_prop_node_t *node)
{
	__ENTER__("flag:%d:wait_ref_cnt:%d:commit_ref_cnt:%d", 
		node->flag, atomic_read(&node->wait_ref_cnt), atomic_read(&node->commit_ref_cnt));
	X_ASSERT(!node->flag && !atomic_read(&node->wait_ref_cnt)
		&& !atomic_read(&node->commit_ref_cnt)); 
	if(node->gran.prop_disk_pages)
		vfree(node->gran.prop_disk_pages);
}

struct phy_prop_node_ops phy_prop_node_ops = {
	.set_prop = set_prop,
	.clear_prop = clear_prop,
	.query_prop = query_prop,
	.end_load = end_load,
	.start_commit = start_commit, 
	.end_commit = end_commit,
	.get_host = phy_prop_node_get_host,
	.cleanup = phy_prop_node_cleanup
};

int init_phy_prop_node(phy_prop_node_t *node, struct phy_prop_obj *host, __u64 node_id)
{
	phy_dev_t *phydev = host->ops->get_host(host);
	asd_pool_t *ap = phydev->asdpool;
	int page_cnt = GRAN_SIZE(ap) / PAGE_SIZE, ret = 0;

	/*at this moment, don't memset mem to zero
	 *by reading attribute dev, init these pages
	 */
	node->gran.prop_disk_pages = vmalloc(sizeof(prop_disk_page_t) * page_cnt);
	if (!node->gran.prop_disk_pages) {
		klog(ERROR, "vmalloc for prop_disk_pages failed\n");
		ret = PTR_ERR(node->gran.prop_disk_pages);
		goto mem_err;
	}
	node->node_id = node_id;
	INIT_LIST_HEAD(&node->dirty_list);
	node->rwlock = RW_LOCK_UNLOCKED;
	node->flag = 0;
	memset(node->wait_ref_root, 0, sizeof(struct rb_root));
	atomic_set(&node->wait_ref_cnt, 0);
	memset(node->commit_ref_root, 0, sizeof(struct rb_root));
	atomic_set(&node->commit_ref_cnt, 0);
	node->host = host;
	node->ops = &phy_prop_node_ops;
mem_err:
	return ret;
}


/*
 * functions for phy_prop_ops
 */

/* if nessary, the caller should lock obj */
void phy_prop_cleanup(struct phy_prop_obj *obj)
{
	phy_prop_node_t *node;
	__u64 i;

	__ENTER__("");
	for(i = 0; i < obj->cnt; i++) {
		node = (phy_prop_node_t *)radix_tree_delete(obj->root, i);
		node->ops->cleanup(nodes[i]);
		kfree(node);
	}
	__LEAVE__("");
}

int flush_phy_prop_node_callback(struct bio *bio, unsigned int bytes, int err)
{
	prop_node_ctx_t *node_ctx = (prop_node_ctx_t *)bio->bi_private;
	prop_obj_ctx_t *obj_ctx = (prop_obj_ctx_t *)node_ctx->obj_ctx;

	__ENTER__("bio:%p:err:%d", bio, err);

	node_ctx->node->ops->end_commit(node_ctx->node, err);

	if (obj_ctx) {
	//	atomic_set(&obj_ctx->err, err);
		obj_ctx->callback(obj_ctx, err);
	}

	free_page_list(node_ctx->pl);
	bio_put(bio);
	kfree(node_ctx);
	
	__LEAVE__("");
	return 0;
}

int load_phy_prop_node_callback(struct bio *bio, unsigned int bytes, int err)
{
	prop_node_ctx_t *node_ctx = (prop_node_ctx_t *)bio->bi_private;
	phy_prop_ctx_t *obj_ctx = (phy_prop_ctx_t *)node_ctx->obj_ctx;

	__ENTER__("bio:%p:err:%d", bio, err);
	if (!err) {
		node_ctx->node->ops->end_load(node_ctx->node, node_ctx->pl);
	}

	if (obj_ctx)
		node_ctx->callback(obj_ctx, err);

	free_page_list(node_ctx->pl);
	bio_put(bio);
	kfree(node_ctx);
	
	__LEAVE__("");
	return 0;
}

int load_all_nodes_callback(phy_prop_ctx_t *obj, int err)
{
	if (err)
		atomic_set(&obj->err, err);
	atomic_dec(&obj->cnt);
	return 0;
}

static __u64 get_phy_prop_node_blk_id(struct phy_prop_obj *obj, __u64 node_id)
{
	return obj->host->prop_dev_start_blk + node_id;
}

static int __io_for_prop_node
(struct phy_prop_obj *obj, phy_prop_node_t *node, void *obj_ctx, int rw)
{
	phy_dev_t *phydev = obj->host;
	asd_pool_t *ap = phydev->asdpool;
	int page_cnt = GRAN_SIZE(ap) / PAGE_SIZE, ret = 0;
	prop_node_ctx_t *node_ctx;
	struct io_region where[1];
	__u64 blk_id = get_phy_prop_node_blk_id(obj, node->node_id);
	
	__ENTER__("");
	node_ctx = kmalloc(sizeof(prop_node_ctx_t), GFP_KERNEL);
	if (!node_ctx) {
		klog(ERROR, "kmalloc failed for node_ctx \n");
		ret = PTR_ERR(node_ctx);
		goto out;
	}
	memset(node_ctx, 0, sizeof(node_ctx));
	node_ctx->node = node;
	if (obj_ctx && rw == READ) {
		node_ctx->obj_ctx = obj_ctx;
		node_ctx->callback = load_all_nodes_callback;
	}
	if (obj_ctx && rw == WRITE) {
		node_ctx->obj_ctx = obj_ctx;
	}
	ret = alloc_page_list(&node_ctx->pl, page_cnt, GFP_KERNEL);
	if (ret < 0) {
		klog(ERROR, "kmalloc failed for node_ctx \n");
		goto page_err;
	}

	where->bdev = ap->propdev->bd;
	where->count = 1;
	where->sector = blk_id * (GRAN_SIZE(ap) >> SECTOR_BITS);

	if (READ == rw)
		ret = asd_io_async(where, READ, node_ctx->pl, load_phy_prop_node_callback,
			(void *)node_ctx, asd_set_bio_flag, NULL);
	else {
		node->ops->start_commit(node, node_ctx->pl);
		ret = asd_io_async(where, rw, node_ctx->pl, flush_phy_prop_node_callback,
			(void *)node_ctx, asd_set_bio_flag, NULL);
	}

	X_ASSERT(ret >= 0);
	goto out;
page_err:
	kfree(node_ctx);
out:
	__LEAVE__("ret:%d", ret);
	return ret;
}

static int load_phy_prop_node
(struct phy_prop_obj *obj, phy_prop_node_t *node, void *obj_ctx)
{
	return __io_for_prop_node(obj, node, obj_ctx, READ);
}

static void wait_for_node_loading(phy_prop_ctx_t *obj_ctx)
{
	while(atomic_read(&obj_ctx->cnt)) {
		wait_for_condition("load phy prop");
	}
}

int load_all_nodes(struct phy_prop_obj *obj)
{
	phy_prop_node_t *nodes[obj->node_cnt];
	phy_prop_ctx_t *obj_ctx;
	__u64 cnt, i, load_cnt = 0;
	int ret = 0;

	__ENTER__("");
	
	obj_ctx = kmalloc(sizeof(phy_prop_ctx_t), GFP_KERNEL);
	if (!obj_ctx) {
		klog(ERROR, "kmalloc for obj_ctx failed \n");
		ret = PTR_ERR(obj_ctx);
		goto ctx_err;
	}
	atomic_set(&obj_ctx->cnt, obj->node_cnt);

	cnt = radix_tree_gang_lookup(obj->root, (void **)nodes, 0, obj->node_cnt);
	X_ASSERT(cnt == obj->node_cnt);
	for(i = 0; i < cnt; i++) {
		nodes[i]->ops->cleanup(nodes[i]);
		ret = load_phy_prop_node(obj, nodes[i], obj_ctx);
		if (ret < 0) {
			klog(ERROR, "load_phy_prop_node ");
			goto load_err;
		}
		load_cnt ++;
	}
	goto out;

load_err:
	for (i = load_cnt; i < cnt; i++) /* this func can free obj_ctx */
		load_all_nodes_callback(obj_ctx, ret);
out:
	wait_for_node_loading(obj_ctx);	
	ret = atomic_read(&obj_ctx->err);
	kfree(obj_ctx);
ctx_err:
	__LEAVE__("ret:%d", ret);
	return ret;
}

//notic: the caller should lock phy_prop_obj outside
int get_phy_prop_node_id(struct phy_prop_obj *obj, __u64 pbid)
{
	asd_pool_t *ap = obj->host->asdpool;
	int node_id = pbid / BITS_PER_PROP_NODE(ap);
	__ENTER__("obj:%p:pbid:%llu", obj, pbid);
	__LEAVE__("ret::node_id:%d", node_id);
	return node_id;
}

phy_prop_node_t *get_phy_prop_node_by_id(struct phy_prop_obj *obj, int node_id)
{
	phy_prop_node_t *node =  NULL;

	__ENTER__("node_id:%llu", node_id);
	node = (phy_prop_node_t *)radix_tree_lookup(obj->root, node_id);
	__LEAVE__("node:%p", node);
	return node;
}

static void __phy_prop_handle(struct phy_prop_obj *obj, __u64 pbid, ref_t * ref, __u8*prop, int type)
{
	unsigned long flag;
	asd_pool_t *ap = obj->host->asdpool;
	int node_id = pbid / BITS_PER_PROP_NODE(ap);
	phy_prop_node_t *node = NULL;
	
	__ENTER__("obj:%p:pbid:%llu:ref:%p:node_id:%d", obj, pbid, ref, node_id);
	X_ASSERT(pbid <= obj->host->blk_count);
	write_lock_irqsave(&obj->lock, flag);
	node = get_phy_prop_node_by_id(obj, node_id);
	X_ASSERT(node);
	switch (type) {
		case SET_PROP:
			X_ASSERT(ref);
			node->ops->set_prop(node, pbid, ref, *prop);
			break;
		case CLERA_PROP:
			node->ops->clear_prop(node, pbid, prop);
			break;
		case QUERY_PROP:
			node->ops->query_prop(node, pbid, prop);
			break;
		default:
			BUG();
	}
	write_unlock_irqrestore(&obj->lock, flag);
	__LEAVE__("");
}

void phy_prop_set(struct phy_prop_obj *obj, __u64 pbid, ref_t *ref, __u8 *prop)
{
	__phy_prop_handle(obj, pbid, ref, prop, SET_PROP);
}

void phy_prop_clear(struct phy_prop_obj *obj, __u64 pbid, __u8 *prop)
{
	__phy_prop_handle(obj, pbid, NULL, prop, CLERA_PROP);
}

void phy_prop_query(struct phy_prop_obj *obj, __u64 pbid, __u8 *prop)
{
	__phy_prop_handle(obj, pbid, NULL,  prop, QUERY_PROP);
}

phy_dev_t *phy_prop_obj_get_host(struct phy_prop_obj *obj)
{
	return obj->host;
}

int flush_phy_prop_node(struct phy_prop_obj *obj, phy_prop_node_t *node, struct prop_obj_ctx_s *obj_ctx)
{
	return __io_for_prop_node(obj, node, obj_ctx, WRITE);
}

struct phy_prop_ops phy_prop_ops = {
	.load_all_nodes = load_all_nodes,
	.phy_prop_set = phy_prop_set,
	.phy_prop_clear = phy_prop_clear,
	.phy_prop_query = phy_prop_query,
	.get_host = phy_prop_obj_get_host,
	.flush_phy_prop_node = flush_phy_prop_node,
	.cleanup = phy_prop_cleanup,
	.get_phy_prop_node_id = get_phy_prop_node_id,
	.get_phy_prop_node_by_id = get_phy_prop_node_by_id,
};

int init_phy_prop_obj(struct phy_prop_obj *obj, phy_dev_t *phydev)
{
	__u64 blk_cnt = obj->host->blk_count, i;
	__u64 node_cnt = blk_cnt / BITS_PER_PROP_NODE(obj->host->asdpool);
	phy_prop_node_t *node;
	int ret = 0;

	__ENTER__("node_cnt:%llu", node_cnt);
	for (i = 0; i < node_cnt; i++) {
		node = kmalloc(sizeof(phy_prop_node_t), GFP_KERNEL);
		if (!node) {
			klog(ERROR, "kmalloc mem for node %llu failed \n", i);
			ret = PTR_ERR(node);
			goto mem_err;
		}
		ret = init_phy_prop_node(node, obj, i);
		if (ret < 0) {
			klog(ERROR, "init_phy_prop_ndoe failed\n");
			goto mem_err;
		}
		ret = radix_tree_insert(obj->root, i, node);
		if (ret < 0) {
			klog(ERROR, "Failed to insert node %llu to phy_prop_obj\n", i);
			goto mem_err;
		}
		obj->node_cnt++;
	}

	ret = obj->ops->load_all_nodes(obj);
	if (ret < 0) {
		klog(ERROR, "load_all_nodes failed \n");
		goto mem_err;
	}
	obj->lock = RW_LOCK_UNLOCKED;
	obj->ops = &phy_prop_ops;
	obj->host = phydev;

	goto out;
mem_err:
	phy_prop_cleanup(obj);
out:
	__LEAVE__("ret:%d", ret);
	return ret;
}

