#include <linux/bio.h>
#include <linux/bitops.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/genhd.h>
#include <linux/idr.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kdev_t.h>
#include <linux/kthread.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/version.h>
#include <linux/socket.h>
#include <linux/uio.h>
#include <linux/net.h>
#include <linux/in.h>
#include <linux/tcp.h>

#define	DEBUG_LOG
#ifdef DEBUG_LOG
#   define DBG_LOG            0x1
#   define DBG_ERROR          0x2

static unsigned int lg_mask = /*DBG_LOG |*/DBG_ERROR;
//static unsigned int lg_mask = DBG_LOG | DBG_ERROR;
extern int printk(const char *fmt, ...);

#	define log(mask, fmt, arg...) \
		do { \
			if(lg_mask & mask) \
			printk(fmt, ##arg); \
		}while(0)
#else
#	define log(mask, fmt, arg...)
#endif

#define HAS_TIMER

#undef TRUE
#undef FALSE
#define TRUE 1
#define FALSE 0

#undef ntohl
#define ntohl
#undef htonl
#define htonl
u32 PORT_NUM;

#define QUEUE_DEPTH   	    256

#define MAX_4K_CNT			4
//#define DISK_4K_LSECTORS    1024*256*200
#define DISK_4K_LSECTORS    256*1024*4
//#define DISK_4K_LSECTORS      10140672
//#define DISK_4K_LSECTORS      10133504
//#define DISK_4K_LSECTORS      20275200
//#define DISK_4K_LSECTORS    256*900
//#define DISK_4K_LSECTORS    5062656
//#define DISK_4K_LSECTORS    958000

#define MINORS  	 		64

#define S5MSG_TAIL_LEN  	8


#define IMAGE_ID_OFFSET   0 

#define DEV_NUM     1

static int port = 0;
module_param(port, int, 0);

static int *sip[] = {"10.10.230.17", 
		     "10.10.230.17", 
		     "10.10.230.17"};

static int  imageid[] = {0,1,2};

#define IO_TIMEOUT (5)

typedef enum msg_status {
    MSG_STATUS_ERR              =-1,
    MSG_STATUS_OK               =0, 
    MSG_STATUS_REPLY_FLUSH      =5, 
    MSG_STATUS_REPLY_LOAD       =6, 
    MSG_STATUS_RETRY_LOAD       =9, 
    MSG_STATUS_VER_MISMATCH     =129,
  //  MSG_STATUS_DELAY_RETRY      =130,
	MSG_STATUS_DELAY_RETRY      =3,
    MSG_STATUS_CANCEL_FLUSH     =131,
    MSG_STATUS_CRC_ERR          =132,
    MSG_STATUS_OPENIMAGE_ERR    =133,
    MSG_STATUS_NOTFOUND         =134, 
    MSG_STATUS_MAX
}msg_status_t;

typedef enum  msg_type{
    MSG_TYPE_READ               =0,
    MSG_TYPE_READ_REPLY         =1,
    MSG_TYPE_WRITE              =2,
    MSG_TYPE_WRITE_REPLY        =3,
    MSG_TYPE_LOADWRITE          =4,
    MSG_TYPE_LOADWRITE_REPLY    =5,
    MSG_TYPE_FLUSHCOMPLETE      =6,
    MSG_TYPE_FLUSHCOMPLETE_REPLY=7,
    MSG_TYPE_CACHEDELETE        =8,
    MSG_TYPE_CACHEDELETE_REPLY  =9,
    MSG_TYPE_KEEPALIVE          =10,
    MSG_TYPE_KEEPALIVE_REPLY    =11,
    MSG_TYPE_CACHEFIND          =12,
    MSG_TYPE_CACHEFIND_REPLY    =13,
    MSG_TYPE_FLUSH_READ         =14,
    MSG_TYPE_FLUSH_READ_REPLY   =15,

    MSG_TYPE_OPENIMAGE          =32,
    MSG_TYPE_OPENIMAGE_REPLY    =33,
    MSG_TYPE_CLOSEIMAGE         =34,
    MSG_TYPE_CLOSEIMAGE_REPLY   =35,
    MSG_TYPE_TRIM               =36,
    MSG_TYPE_TRIM_REPLY         =37,
    MSG_TYPE_FLUSH_REQUEST      =38,
    MSG_TYPE_FLUSH_REPLY        =39,
    MSG_TYPE_LOAD_REQUEST       =40,
    MSG_TYPE_LOAD_REPLY         =41,
    MSG_TYPE_SNAP_CHANGED       =42,
    MSG_TYPE_SNAP_CHANGED_REPLY =43,
    MSG_TYPE_GET_SYSINFO        =44,
    MSG_TYPE_GET_SYSINFO_REPLY  =45,
    MSG_TYPE_GET_STASTICINFO    =46,
    MSG_TYPE_GET_STASTICINFO_REPLY  =47,
    MSG_TYPE_GET_IMAGE_META     =48,
    MSG_TYPE_GET_IMAGE_META_REPLY   =49,
    MSG_TYPE_S5META_ACCESS          = 50,
    MSG_TYPE_S5META_ACCESS_REPLY    = 51,
    MSG_TYPE_S5VOLUME_REQ               = 52,
    MSG_TYPE_S5VOLUME_REPLY             = 53,
    MSG_TYPE_MAX
}msg_type_t;


#define ASSERT(X)      BUG_ON(!(X))

struct trac_id_info
{
	volatile struct bio	* bio;
	u32 rw;
	u64 slba;
	volatile u32 tid;
    volatile	struct bio_vec *bvec;
	u32 bv_cnt;
	volatile u64 jiffies;
	u32 retry_count;
	u8 read_unit;
};

struct tid_entry
{
    volatile u32 id;
    volatile void *next;
} ;

typedef struct message_struct
{
	u32 magic_num;
	u32 msg_type;
	u32 transaction_id;
	u64 slba;			/*start of LBA*/
	u32 data_len;
	u64 image_id;
	u32 user_id;
	u32 pool_id;
	u32 nlba;			/*count of LBAs*/
	u32 obj_ver;		/*utilized on flush or load*/
	u32 listen_port;
	u32 snap_sequence;
	u32 status;
	u8  iops;
	u8  is_head;
	u8  read_unit;
	u8  reserved[1];
}__attribute__((packed)) MSG;


struct s5bd_device
{
	struct request_queue *queue;
    struct gendisk *disk;
	spinlock_t tid_lock;
	spinlock_t bio_list_lock;
	struct semaphore sembio_lock;
	struct semaphore semtid_lock;
	struct tid_entry tid_pool[QUEUE_DEPTH];
	volatile struct tid_entry *	tid_head;
	volatile struct tid_entry *	tid_tail;
	struct timer_list check_io_timeout_timer;
	volatile struct trac_id_info s5bd_tid_info[QUEUE_DEPTH];
	struct task_struct *s5bd_thread;
	struct task_struct *s5bd_thread_snd;
	struct sockaddr_in server_addr;
	struct socket *s5sock;  
	volatile int stop_s5bd_submit_flag;	
	u32 write_count, read_count;
	volatile  u8 del_timer_flag;
	int snd_thread_exit;
	u8 blk_name[20];
	u8 thr_name[20];
	int s5bd_major;
	u64 image_id;
	u8 exc_exit;
	u64 w_retry_count;
	u64 r_retry_count;
    u32 retry_write_id_error;
	u32 retry_read_id_error;
	u32 bio_conflit_cnt;
	wait_queue_head_t		wq_snd;
	int snd_thead_exit;
   	struct bio_list snd_bio_list;
   	struct bio_list retry_list;
	volatile	u8 no_request;

	//test
	u32 lba_min;
	u32 lba_max;
	volatile u64 tid_reply_count;
	volatile u64 tid_alloc_count;
	volatile u64 old_s5_snd_count;
	volatile u64 s5_snd_count;
};

uint64_t rdtsc(void) {
uint32_t lo, hi;
/* We cannot use "=A", since this would use %rax on x86_64 */
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}

int inet_pton4 (const char *src, u_char *dst)
{
        static const char digits[] = "0123456789";
        int saw_digit, octets, ch;
#define NS_INADDRSZ     4
        u_char tmp[NS_INADDRSZ], *tp;

        saw_digit = 0;
        octets = 0;
        *(tp = tmp) = 0;
        while ((ch = *src++) != '\0') {
                const char *pch;

                if ((pch = strchr(digits, ch)) != NULL) {
                        u_int new = *tp * 10 + (pch - digits);

                        if (saw_digit && *tp == 0)
                                return (0);
                        if (new > 255)
                                return (0);
                        *tp = new;
                        if (!saw_digit) {
                                if (++octets > 4)
                                        return (0);
                                saw_digit = 1;
                        }
                } else if (ch == '.' && saw_digit) {
                        if (octets == 4)
                                return (0);
                        *++tp = 0;
                        saw_digit = 0;
                } else
                        return (0);
        }
        if (octets < 4)
                return (0);
        memcpy(dst, tmp, NS_INADDRSZ);
        return (1);
}

static struct s5bd_device * s5bd_dev[DEV_NUM];
u32 tid_alloc_failed;
static void s5bd_tid_init(struct s5bd_device * dev)
{
	u32 i;
	for(i = 0; i < QUEUE_DEPTH; i++)
	{
		dev->tid_pool[i].id = i;
		dev->tid_pool[i].next = &dev->tid_pool[i+1];
	}
	
	dev->tid_head = &dev->tid_pool[0];
	dev->tid_tail = &dev->tid_pool[QUEUE_DEPTH - 1];
	dev->tid_tail->next = NULL;
}
static inline int s5bd_tid_alloc(struct s5bd_device * dev)
{
	u32 id;
    spin_lock(&dev->tid_lock);
   	if (dev->tid_head == dev->tid_tail)
	{
		tid_alloc_failed ++;
    		spin_unlock(&dev->tid_lock);
		return -1;
	}
   	id = dev->tid_head->id;
   	dev->tid_head = dev->tid_head->next;
	dev->tid_alloc_count ++;
	spin_unlock(&dev->tid_lock);
	return id;
}

static inline void s5bd_tid_free(struct s5bd_device * dev, u32 id)
{
    spin_lock(&dev->tid_lock);
    dev->tid_tail->next = &dev->tid_pool[id];
    dev->tid_tail = &dev->tid_pool[id];
	dev->tid_reply_count ++;
    spin_unlock(&dev->tid_lock);

	up(&dev->sembio_lock);
}

void s5bd_start_io_acct(struct s5bd_device * dev, struct bio *bio)
{
		if (bio == NULL) return;
        const u32 rw = bio_data_dir(bio);
        u32 cpu;
        struct hd_struct* part = &dev->disk->part0;
        if(!part)
                return;
        cpu = part_stat_lock();
        part_round_stats(cpu, part);
        part_stat_inc(cpu, part, ios[rw]);
        part_stat_add(cpu, part, sectors[rw], bio_sectors(bio));

        (void) cpu; /* The macro invocations above want the cpu argument, I do not like
                       the compiler warning about cpu only assigned but never used... */
        part_inc_in_flight(part, rw);
        part_stat_unlock();
}

void s5bd_end_io_acct(struct s5bd_device *dev, struct bio *bio, u32 id)
{

		if (bio == NULL) return;
        u32 rw = bio_data_dir(bio);
        u32 cpu;
        unsigned long duration = jiffies - dev->s5bd_tid_info[id].jiffies;
        struct hd_struct* part = &dev->disk->part0;
        if(!part)
                return;

        cpu = part_stat_lock();
        part_stat_add(cpu, part, ticks[rw], duration);
        part_round_stats(cpu, part);
        part_dec_in_flight(part, rw);
        part_stat_unlock();
}

static int s5bd_aio_write(struct s5bd_device * dev, volatile struct trac_id_info * tid_info)
{
	struct msghdr msg_hdr;
	int rc;
	u8 s5msg_tail[S5MSG_TAIL_LEN];
	struct message_struct  s5msg;
	struct kvec iov[3];

	memset(&s5msg, 0, sizeof(struct message_struct));

	dev->s5_snd_count ++;

	s5msg.magic_num = htonl(0x3553424e);
	s5msg.msg_type = htonl(MSG_TYPE_WRITE);
	s5msg.transaction_id = htonl(tid_info->tid);
	s5msg.user_id = htonl(2);
	s5msg.pool_id = htonl(3);

	s5msg.image_id = (u64)(htonl(dev->image_id));
	s5msg.nlba = htonl(1);
	s5msg.slba = ((u64)(htonl(tid_info->slba)));
	s5msg.data_len = htonl(4096);

	msg_hdr.msg_name = (struct sockaddr *) &dev->server_addr;
	msg_hdr.msg_namelen = sizeof(struct sockaddr);
	msg_hdr.msg_control = NULL;
	msg_hdr.msg_controllen = 0;

	msg_hdr.msg_flags = MSG_DONTWAIT; //| TCP_NODELAY;//MSG_WAITALL|MSG_NOSIGNAL |TCP_NODELAY |TCP_CORK; 
	
	iov[0].iov_base = &s5msg;
	iov[0].iov_len = sizeof(struct message_struct);
	iov[1].iov_base = page_address(tid_info->bvec->bv_page);
	iov[1].iov_len = 4096;		
	iov[2].iov_base = s5msg_tail;
	iov[2].iov_len = S5MSG_TAIL_LEN;

	
	log(DBG_LOG,"s5bd_aio_write tid %d slba %lld write_count %d bio_vcnt %d image_id %d \r\n",
		tid_info->tid , s5msg.slba, ++dev->write_count, tid_info->bv_cnt, s5msg.image_id);
	
	tid_info->jiffies = jiffies;

	rc = kernel_sendmsg(dev->s5sock, &msg_hdr, iov, 3,
		sizeof(struct message_struct) + S5MSG_TAIL_LEN + 4096);
	if (rc < 0)
	{
        tid_info->bio = NULL;      
		tid_info->jiffies = 0;
        s5bd_tid_free(dev, tid_info->tid);
		log(DBG_ERROR,"ERROR : s5bd_aio_write failed %d \r\n", rc);
		return -1;
	}

	return  0;
}

static int s5bd_aio_read(struct s5bd_device * dev, struct trac_id_info * tid_info)
{
	struct msghdr msg_hdr;
	int rc;
	struct message_struct s5msg;
	struct kvec iov[2];
	u8 s5msg_tail[S5MSG_TAIL_LEN];

	memset(&s5msg, 0, sizeof(struct message_struct));

	dev->s5_snd_count ++;
	s5msg.magic_num = htonl(0x3553424e);
	s5msg.msg_type = htonl(MSG_TYPE_READ);
	s5msg.transaction_id = htonl(tid_info->tid);
	s5msg.user_id = htonl(2);
	s5msg.pool_id = htonl(3);

	s5msg.image_id = (u64)(htonl(dev->image_id));
	s5msg.nlba = htonl(1);
	s5msg.slba = (u64)(htonl(tid_info->slba));
	s5msg.data_len = htonl(0);
	
	s5msg.read_unit = tid_info->read_unit;
	
	msg_hdr.msg_name = (struct sockaddr *) &dev->server_addr;
	msg_hdr.msg_namelen = sizeof(struct sockaddr);
	msg_hdr.msg_control = NULL;
	msg_hdr.msg_controllen = 0;
	msg_hdr.msg_flags = MSG_DONTWAIT; // |TCP_NODELAY;//MSG_WAITALL|MSG_NOSIGNAL;

	iov[0].iov_base = &s5msg;
	iov[0].iov_len = sizeof(struct message_struct);
	
	iov[1].iov_base = s5msg_tail;
	iov[1].iov_len = S5MSG_TAIL_LEN;
	
	log(DBG_LOG,"send : s5bd_aio_read tid  %d  slba %lld read_count %d , image_id %d \r\n",
		tid_info->tid, tid_info->slba, ++dev->read_count, s5msg.image_id );
	
	tid_info->jiffies = jiffies;
	
	rc = kernel_sendmsg(dev->s5sock, &msg_hdr, iov, 2,
			iov[0].iov_len + iov[1].iov_len);
	if (rc < 0)
	{
        tid_info->bio = NULL;       
		tid_info->jiffies = 0;
	    s5bd_tid_free(dev, tid_info->tid);
		log(DBG_ERROR,"ERROR : s5bd_aio_read failed %d \r\n", rc);
        return -1;
	}

	return 0;
	
}
enum 
{
	S5_WRITE = 0xa0,
	S5_READ =  0xb0
};
volatile u64 t2, t1, t4, iocount;
volatile u32 t3[10], tcount;

static volatile u64 bio_w_cnt_distb[MAX_4K_CNT];
static volatile u64 bio_r_cnt_distb[MAX_4K_CNT];

static void record_lba(struct s5bd_device* dev, u32 lba)
{
	if(lba > dev->lba_max)
	{
		dev->lba_max = lba;
	//	printk("lba max %lu\r\n", lba);
	}
	else if(lba < dev->lba_min)
	{
		dev->lba_min = lba;
	//	printk("lba min %lu\r\n", lba);
	}
}

static int s5bd_submit_io(struct s5bd_device * dev, struct bio *bio)
{
	int tid[MAX_4K_CNT];
	u64 j1;
	int ret,i;
	int idx = 0;
	
	log(DBG_LOG, "%s %d bio_vcnt %d \r\n", 
		__FUNCTION__, __LINE__, bio->bi_vcnt);
	
	if ((bio->bi_vcnt > MAX_4K_CNT) || (bio->bi_vcnt == 0))
	{	
		log(DBG_ERROR, "excetion : %s %d bio_vcnt %d dir %d \r\n", 
			__FUNCTION__, __LINE__, bio->bi_vcnt, bio_data_dir(bio));

		return -1;
	}
	j1  = jiffies;


	while (1) 
	{
		tid[idx] = s5bd_tid_alloc(dev);
		if (tid[idx] != -1)
			idx ++;
		else
			{
	//		schedule_timeout(1);
			down_timeout(&dev->sembio_lock, 1);
			}
		if (idx == bio->bi_vcnt)
			break;

		if((jiffies - j1) > IO_TIMEOUT*HZ)
		{
			log(DBG_ERROR,"ERROR: s5_submit_io tid erro \r\n");
			return  -1;
		}	
	}

    //t1 = rdtsc();


    if (bio_data_dir(bio))
    {
		unsigned short bi_vcnt = bio->bi_vcnt;

		bio_w_cnt_distb[bio->bi_vcnt-1] ++;

    	for (i = 0 ; i < bio->bi_vcnt; i++)
    	{
			ASSERT(tid[i] < QUEUE_DEPTH);
			//log(DBG_ERROR, "TID : %s %d bio_vcnt %d dir %d tid %d \r\n", 
			//	__FUNCTION__, __LINE__, bio->bi_vcnt, bio_data_dir(bio), tid[i]);
			
			dev->s5bd_tid_info[tid[i]].bio = bio;
			dev->s5bd_tid_info[tid[i]].tid = tid[i];
			dev->s5bd_tid_info[tid[i]].bv_cnt = bio->bi_vcnt;
			dev->s5bd_tid_info[tid[i]].rw = S5_WRITE;
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 2)	
		    dev->s5bd_tid_info[tid[i]].slba = (bio->bi_sector >> 3) + i;    //bi_sector 512 bytes per unit,lba 4096 bytes per unit
			dev->s5bd_tid_info[tid[i]].bvec = &bio->bi_io_vec[i];
#else
		    dev->s5bd_tid_info[tid[i]].slba = (bio->bi_iter.bi_sector >> 3) + i;    //bi_sector 512 bytes per unit,lba 4096 bytes per unit
			dev->s5bd_tid_info[tid[i]].bvec = bio->bi_io_vec[i]; 
#endif
			record_lba(dev, dev->s5bd_tid_info[tid[i]].slba);
		}
		
    	for (i = 0 ; i < bi_vcnt; i++)
		{
			if (dev->no_request == 1)
				return -1;

			if (s5bd_aio_write(dev, &dev->s5bd_tid_info[tid[i]]) == -1)
			{
				return -1;
			}
			//printk("write1 lba %lu\r\n", dev->s5bd_tid_info[tid[i]].slba );
		}

    	
	}
	else
	{
		/* When s5bd_kthread_snd is accessing bi_vcnt which may be modified by
		 * bio_endio in s5bd_kthread and then induce a calltrace.
		 * Use a variable to save bio->bi_vcnt to fix.
		 */
		unsigned short bi_vcnt = bio->bi_vcnt;

		bio_r_cnt_distb[bio->bi_vcnt-1] ++;

		for (i = 0 ; i < bio->bi_vcnt; i++)
		{
			ASSERT(tid[i] < QUEUE_DEPTH);
			dev->s5bd_tid_info[tid[i]].bio = bio;
			dev->s5bd_tid_info[tid[i]].tid = tid[i];
			dev->s5bd_tid_info[tid[i]].bv_cnt = bio->bi_vcnt;
			
			if (bi_vcnt == 1)	
				dev->s5bd_tid_info[tid[i]].read_unit = 0;
			else if (bi_vcnt == 2)
				dev->s5bd_tid_info[tid[i]].read_unit = 1;
			else
				dev->s5bd_tid_info[tid[i]].read_unit = 2;

			dev->s5bd_tid_info[tid[i]].rw = S5_READ;
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 2)	
			dev->s5bd_tid_info[tid[i]].slba = (bio->bi_sector >> 3) + i;    //bi_sector 512 bytes per unit,lba 4096 bytes per unit
			dev->s5bd_tid_info[tid[i]].bvec = &bio->bi_io_vec[i];
#else
			dev->s5bd_tid_info[tid[i]].slba = (bio->bi_iter.bi_sector >> 3) + i;    //bi_sector 512 bytes per unit,lba 4096 bytes per unit
			dev->s5bd_tid_info[tid[i]].bvec = bio->bi_io_vec[i]; 
#endif
			record_lba(dev, dev->s5bd_tid_info[tid[i]].slba);
		}

		for (i = 0 ; i < bi_vcnt; i++)
		{
			if (dev->no_request == 1)
				return -1;

			if (s5bd_aio_read(dev, &dev->s5bd_tid_info[tid[i]]) == -1)
			{
				return -1;
			}
		}
		
	}
#if 0	
    t2 = rdtsc() - t1;
    t3[tcount ++] = t2;
    if(tcount >= 10)
		tcount = 0;
     t4 += t2;
     iocount ++; 
#endif
//	up(&dev->sembio_lock);
	return 0; 
}


#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10,0)
static int s5bd_bio_request(struct request_queue *q, struct bio *bio)
#else
static void  s5bd_bio_request(struct request_queue *q, struct bio *bio)
#endif
{
    int result = -1;
	struct s5bd_device * dev = q->queuedata;

	ASSERT(dev != NULL);

#if 0
    if (bio_data_dir(bio) == 0)
    {
//	log(DBG_LOG, "%s  %s %p\r\n", __FUNCTION__, __LINE__, bio);
	    bio_endio(bio, 0);
		return 0;
    }
#endif

    if (dev->no_request)
    {
		bio_endio(bio, -EIO);
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
		return 0;
#else
		return;
#endif
    }

    log(DBG_LOG, "%s  %d %p\r\n", __FUNCTION__, __LINE__, bio);
	spin_lock(&dev->bio_list_lock);
	bio_list_add(&dev->snd_bio_list, bio);
    spin_unlock(&dev->bio_list_lock);
	
	s5bd_start_io_acct(dev, bio);

    wake_up(&dev->wq_snd);

#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
	return 0;
#endif
}

static int s5bd_socket_open(struct s5bd_device * dev, u32 port)
{
    int rc;
	u32 ipint;

	log(DBG_ERROR, "%s %d sip[%d]\r\n", __FUNCTION__, __LINE__, port - PORT_NUM);
	
	if ((rc = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &dev->s5sock)) < 0)
	{  
		log(DBG_ERROR,"ERROR: %s %d rc %d \r\n",	__FUNCTION__, __LINE__, rc);
		return - 1;  
	}  
	
	memset(&dev->server_addr, 0, sizeof(struct sockaddr_in));   
	
	dev->server_addr.sin_family = AF_INET; 
	dev->server_addr.sin_port = htons(port); 

	inet_pton4(sip[port - PORT_NUM], &ipint);
	printk ("socket connect server 0x%08x, %s port 0x%x \r\n", ipint, sip[port - PORT_NUM], port);
	dev->server_addr.sin_addr.s_addr =  ipint;		// SERV_ADDR;

  	if((rc = kernel_connect(dev->s5sock, (struct sockaddr *) &dev->server_addr,
        sizeof(struct sockaddr_in), 0)) < 0) 
 	{
		log(DBG_ERROR,"ERROR: %s %d rc %d\r\n",  __FUNCTION__, __LINE__, rc);
		return -1;  
  	}

	log(DBG_ERROR,"s5bd_socket_open success \r\n");

    return 0;

}

static int s5bd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
{
	switch (cmd)
	{
		default:
			return 0;
	}
    return 0;
}


static const struct block_device_operations s5bd_fops = {
    .owner = THIS_MODULE,
    .ioctl = s5bd_ioctl,
    .compat_ioctl = s5bd_ioctl,
};

int  s5bd_check_lba_conflit (struct s5bd_device *dev, struct bio *bio)
{
	u32 i, j;
	u64 lba;

	for (i = 0 ; i < QUEUE_DEPTH; i ++)
	{
		if (dev->s5bd_tid_info[i].bio != NULL)
		{

			for (j =0; j < bio->bi_vcnt; j ++)
			{
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 2)   
            	lba = (bio->bi_sector >> 3) + j;    //bi_sector 512 bytes per unit,lba 4096 bytes per unit
#else
            	lba = (bio->bi_iter.bi_sector >> 3) + j;    //bi_sector 512 bytes per unit,lba 4096 bytes per unit
#endif
		   		if (dev->s5bd_tid_info[i].slba == lba)
					return TRUE;
			}
		}	
	}
	return FALSE;
}


static int s5bd_kthread_snd(void *data)
{
	struct s5bd_device *dev = data;
    struct  bio *bio;
	struct  trac_id_info * info;
    int result;

	while (1)
	{
		wait_event(dev->wq_snd, 
		 	(dev->snd_thread_exit == 1) || !bio_list_empty(&dev->snd_bio_list));
		if(dev->snd_thread_exit == 1)
			return 0;

    	log(DBG_LOG, "%s  %d %p\r\n", __FUNCTION__, __LINE__, bio);
   		while (1)
		{
			spin_lock (&dev->bio_list_lock);
			if (bio_list_peek(&dev->snd_bio_list) == NULL)
			{	
				spin_unlock(&dev->bio_list_lock);
				break;
			}	
			bio = bio_list_pop(&dev->snd_bio_list);
		    spin_unlock(&dev->bio_list_lock);

			if (dev->no_request == 1)
			{
				bio_endio(bio, -EIO);
				break;
			}
        	result = s5bd_submit_io(dev, bio);
    		if (unlikely(result))
   	 		{
    			log(DBG_LOG, "%s  %d %p\r\n", __FUNCTION__, __LINE__, bio);
				bio_endio(bio, -EIO);
		    	dev->no_request =1 ;
				break;
    		}

		}
	}

}


static int s5bd_kthread(void *data)
{
	struct s5bd_device * dev = data;
	struct msghdr msg_hdr;
	struct kvec iov;
	struct bio * bio;
	struct message_struct s5msg;
	//struct bio_vec * bvec;
   	u32 bd_type;
	u64 lba;
   	u32 tid;
   	u32 status;
	int rc;
	u8 s5msg_tail[S5MSG_TAIL_LEN];

	msg_hdr.msg_name = (struct sockaddr *) &dev->server_addr;
	msg_hdr.msg_namelen = sizeof(struct sockaddr);
	msg_hdr.msg_control = NULL;
	msg_hdr.msg_controllen = 0;
	msg_hdr.msg_flags = MSG_WAITALL|MSG_NOSIGNAL;
		

    while (!kthread_should_stop())
    {
        set_current_state(TASK_INTERRUPTIBLE);

		iov.iov_base = &s5msg;
		iov.iov_len = sizeof(struct message_struct);

		if ((rc = kernel_recvmsg(dev->s5sock, &msg_hdr, &iov, 1, 
			sizeof(struct message_struct), MSG_WAITALL|MSG_NOSIGNAL)) < 0)
		{
			log(DBG_ERROR,"ERROR: FUNC %s LINE %d rc %d \r\n",  __FUNCTION__, __LINE__, rc);
			dev->exc_exit = 1;
			return -1;
		}
		
        bd_type = ntohl(s5msg.msg_type);
        tid = ntohl(s5msg.transaction_id);	
        status = ntohl(s5msg.status);
		status &= 0xff;

		log(DBG_LOG, "%s %d tid %d \r\n", __FUNCTION__, __LINE__, tid);
		if ((tid >= QUEUE_DEPTH) || (dev->s5bd_tid_info[tid].bio == NULL) || 
			(dev->s5bd_tid_info[tid].jiffies == 0))
		{
			log(DBG_ERROR,"ERROR : tid error %d \r\n", tid	);

			dev->exc_exit = 1;
			return  -1;
		}
		
		log(DBG_LOG,"recv :----msg_type %x  tid %d  status %x \r\n", s5msg.msg_type, tid, s5msg.status);
		
		switch (bd_type)
		{
			case MSG_TYPE_WRITE_REPLY:
#if 1
				if (status == MSG_STATUS_DELAY_RETRY)	
			    {
					if ( dev->s5bd_tid_info[tid].tid != tid || dev->s5bd_tid_info[tid].rw != S5_WRITE)
					{
						dev->retry_write_id_error ++;
					}
			    	log(DBG_LOG,"recv : write_retry tid %x %x %p %p rwflag %x \r\n", tid, dev->s5bd_tid_info[tid].tid,
						 dev->s5bd_tid_info[tid].bio, dev->s5bd_tid_info[tid].bvec, dev->s5bd_tid_info[tid].rw);	

					dev->w_retry_count ++;	
					iov.iov_base = s5msg_tail;
					iov.iov_len = S5MSG_TAIL_LEN;
			    	
					if ((rc = kernel_recvmsg(dev->s5sock, &msg_hdr,
								&iov, 1, S5MSG_TAIL_LEN, MSG_WAITALL|MSG_NOSIGNAL)) < 0)
					{
						log(DBG_ERROR,"ERROR: FUNC %s LINE %d  rc %d \r\n",  __FUNCTION__, __LINE__, rc);

						dev->exc_exit = 1;
						return -1;
					}

                    if (s5bd_aio_write(dev, &dev->s5bd_tid_info[tid]) < 0)
                    {
                       log(DBG_ERROR,"ERROR : %s %d \r\n", __FUNCTION__, __LINE__);
					   dev->exc_exit = 1;
					   return -1;
                    }

					++dev->s5bd_tid_info[tid].retry_count ;
					continue;
             	}
                else
#endif
                {
					log(DBG_LOG,"write reply : tid %d bi_idx %d bi_vcnt %d msglen %d slba %d \r\n", tid, 
						dev->s5bd_tid_info[tid].bio->bi_idx, dev->s5bd_tid_info[tid].bio->bi_vcnt,
						ntohl(s5msg.data_len), ntohl(s5msg.slba)); 

                	if (++dev->s5bd_tid_info[tid].bio->bi_idx == dev->s5bd_tid_info[tid].bio->bi_vcnt)
					{	
							if (status  == 0xff)
							{
								bio_endio((struct bio  * )dev->s5bd_tid_info[tid].bio, -EIO);
								printk("s5 write return BIO error tid  %d\r\n", tid);
							}
							else
								bio_endio((struct bio  * )dev->s5bd_tid_info[tid].bio, 0);
								
					   	 	s5bd_end_io_acct(dev, bio, tid);
					}
					dev->s5bd_tid_info[tid].bio = NULL;
					dev->s5bd_tid_info[tid].jiffies = 0;
					dev->s5bd_tid_info[tid].bvec = NULL;
					dev->s5bd_tid_info[tid].retry_count = 0;

					s5bd_tid_free(dev, tid);
                }
				
				break;

			case MSG_TYPE_READ_REPLY:
#if 1  	
			    
				if ( dev->s5bd_tid_info[tid].tid != tid || dev->s5bd_tid_info[tid].rw != S5_READ)
				{
						dev->retry_read_id_error ++;
				}

				if (dev->s5bd_tid_info[tid].slba != ntohl(s5msg.slba))
				{
					log(DBG_ERROR,  "read replay slab ERROR expected %d received %d \r\n", dev->s5bd_tid_info[tid].slba, ntohl(s5msg.slba));
				}

				if(status == MSG_STATUS_DELAY_RETRY)
                {

			    	log(DBG_LOG,"recv : read_retry tid %x %x %p %p rwflag %x \r\n", tid, dev->s5bd_tid_info[tid].tid,
						 dev->s5bd_tid_info[tid].bio, dev->s5bd_tid_info[tid].bvec, dev->s5bd_tid_info[tid].rw);	
					dev->r_retry_count ++;	

					iov.iov_base = s5msg_tail;
					iov.iov_len = S5MSG_TAIL_LEN;

					if ((rc = kernel_recvmsg(dev->s5sock, &msg_hdr,
								&iov, 1, S5MSG_TAIL_LEN, MSG_WAITALL|MSG_NOSIGNAL)) < 0)
					{
						log(DBG_ERROR,"ERROR: FUNC %s LINE %d  rc %d \r\n",  __FUNCTION__, __LINE__, rc);

					    dev->exc_exit = 1;
						return -1;
					}

					if (s5bd_aio_read(dev, &dev->s5bd_tid_info[tid]) < 0)
	                {
	                    log(DBG_ERROR,"ERROR : %s %d \r\n", __FUNCTION__, __LINE__);
                    
					    dev->exc_exit = 1;
						return -1;
                    }

             
					++dev->s5bd_tid_info[tid].retry_count ;

                    continue;                      
                }
#endif
				bio = (struct bio  * )dev->s5bd_tid_info[tid].bio;

				iov.iov_base = page_address(dev->s5bd_tid_info[tid].bvec->bv_page);
				iov.iov_len = 4096;
				
				log(DBG_LOG,"read reply : iov.iov_base %p bi_idx %d bi_vcnt %d msglen %d slba %d tid %d \r\n",
					iov.iov_base, 
					dev->s5bd_tid_info[tid].bio->bi_idx, dev->s5bd_tid_info[tid].bio->bi_vcnt,
					ntohl(s5msg.data_len), ntohl(s5msg.slba),
					tid); 
				
                if (ntohl(s5msg.data_len) == 4096)
                {           
				    if ((rc = kernel_recvmsg(dev->s5sock, &msg_hdr, &iov,
                        1, 4096, MSG_WAITALL|MSG_NOSIGNAL)) < 0)
				    {
					    log(DBG_ERROR,"ERROR: FUNC %s LINE %d	rc %d \r\n", 
                            __FUNCTION__, __LINE__, rc);

					    dev->exc_exit = 1;
						return -1;
				    }
                }
			/* the data never written */	

				if ((ntohl(s5msg.data_len) == 0) && (status == MSG_STATUS_OK))
					memset(iov.iov_base, 0xff, iov.iov_len);
				
				if (++dev->s5bd_tid_info[tid].bio->bi_idx == dev->s5bd_tid_info[tid].bio->bi_vcnt)
				{
					if (status  == 0xff)
					{
						bio_endio((struct bio  * )dev->s5bd_tid_info[tid].bio, -EIO);
						printk("s5 read return BIO error tid %d \r\n",tid);
					}
					else
						bio_endio((struct bio  * )dev->s5bd_tid_info[tid].bio, 0);
					 s5bd_end_io_acct(dev, bio, tid);
				}
	
				dev->s5bd_tid_info[tid].bio = NULL;
				dev->s5bd_tid_info[tid].jiffies = 0;
				dev->s5bd_tid_info[tid].bvec = NULL;
				dev->s5bd_tid_info[tid].retry_count = 0;

				s5bd_tid_free(dev, tid);

				break;
			default:
                
				log(DBG_ERROR,"recv : ERROR %s %d msg_type %d \r\n",	__FUNCTION__, __LINE__, bd_type);

			    dev->exc_exit = 1;
				return -1;
				//break;
		}
		
		log(DBG_LOG, "%s %d\r\n", __FUNCTION__, __LINE__);
		iov.iov_base = s5msg_tail;
		iov.iov_len = S5MSG_TAIL_LEN;

		if ((rc = kernel_recvmsg(dev->s5sock, &msg_hdr,
            &iov, 1, S5MSG_TAIL_LEN, MSG_WAITALL|MSG_NOSIGNAL)) < 0)
		{
			log(DBG_ERROR,"ERROR: FUNC %s LINE %d  rc %d \r\n",  __FUNCTION__, __LINE__, rc);
			
			    dev->exc_exit = 1;
			return -1;
		}
    }
	
    return 0;
}

static void s5bd_timer_handler(unsigned long data)
{
	u32 i, j;
	struct s5bd_device * dev = (struct s5bd_device *)data;
	u64 time;

	u32 pending = dev->tid_alloc_count - dev->tid_reply_count;
#if 0
	log(DBG_ERROR, "Pend_BD: %d W_Retry: %d, R_Retry: %d retry_id_error[%d:%d], conflit %d \r\n",	
		pending, dev->w_retry_count, dev->r_retry_count,
		dev->retry_read_id_error, dev->retry_write_id_error,
		dev->bio_conflit_cnt);
/*
	log(DBG_ERROR, "BIO READ Vec: \n");
	for (i =0; i< MAX_4K_CNT; i ++)
		log(DBG_ERROR, " %ld ",	bio_r_cnt_distb[i]);
	
	log(DBG_ERROR, " \r\n");
	
	log(DBG_ERROR, "BIO WRITE Vec: \n");
	for (i =0; i< MAX_4K_CNT; i ++)
		log(DBG_ERROR, " %ld ",	bio_w_cnt_distb[i]);
	
	log(DBG_ERROR, " \r\n");
*/
#endif
//	if(iocount != 0)
//		log(DBG_ERROR, "avg %d t4 %ld iocount %d\r\n", t4/iocount, t4, iocount);

//	log(DBG_ERROR,"FUNC %s  LINE %d dev->buf_tail_idx %d dev->buf_head_idx %d  dev->cur_rcv_idx %d \r\n",
//		__FUNCTION__, __LINE__, dev->buf_tail_idx, dev->buf_head_idx, dev->cur_rcv_idx );
//	log(DBG_ERROR, "%d %d %d %d %d %d %d %d %d %d \r\n", t3[0], t3[1], t3[2],  t3[3],  t3[4], 
//  	t3[5],  t3[6],  t3[7],  t3[8], t3[9]);

//	mod_timer(&dev->check_io_timeout_timer, jiffies + IO_TIMEOUT*HZ);
//	return;

	if (dev->s5_snd_count != dev->old_s5_snd_count) 
	{
		dev->old_s5_snd_count = dev->s5_snd_count;

		mod_timer(&dev->check_io_timeout_timer, jiffies + IO_TIMEOUT*HZ);
		return;
	}
#if 1
	for (i = 0 ; i < QUEUE_DEPTH; i++)
	{
		if ((dev->s5bd_tid_info[i].bio != NULL) && (dev->s5bd_tid_info[i].jiffies != 0))
		{
			if ((time = (jiffies - dev->s5bd_tid_info[i].jiffies)) > IO_TIMEOUT*HZ )
			{

				log(DBG_ERROR, "ERROR CHECK bio time out tid %d bio %p  %d, retrycount %d jiff %ld\r\n", 
					dev->s5bd_tid_info[i].tid, dev->s5bd_tid_info[i].bio, time,  dev->s5bd_tid_info[i].retry_count, jiffies);
			#if 1
				dev->no_request = 1;
				bio_endio((struct bio *)dev->s5bd_tid_info[i].bio, -EIO); //bio timeout

				for (j = i + 1; j < QUEUE_DEPTH; j ++)
				{
					if (dev->s5bd_tid_info[j].bio == dev->s5bd_tid_info[i].bio)
					{
						log(DBG_ERROR, "ERROR2 bio time out tid %d bio %p  %d, retrycount %d\r\n", 
							dev->s5bd_tid_info[j].tid, dev->s5bd_tid_info[j].bio, time,  dev->s5bd_tid_info[j].retry_count);

						dev->s5bd_tid_info[j].bio = NULL;
						dev->s5bd_tid_info[j].jiffies = 0;
						dev->s5bd_tid_info[j].retry_count  = 0;
						s5bd_tid_free(dev, dev->s5bd_tid_info[j].tid);
					}
				}
				dev->s5bd_tid_info[i].bio = NULL;
				dev->s5bd_tid_info[i].jiffies = 0;
				dev->s5bd_tid_info[i].retry_count  = 0;
				s5bd_tid_free(dev, dev->s5bd_tid_info[i].tid);
			#endif 0 
			}
		}
	  
	}
#endif
	mod_timer(&dev->check_io_timeout_timer, jiffies + IO_TIMEOUT*HZ);
}

static int __init s5bd_init(void)
{
    int result;
	u32 i;
	uint64_t  disk_size;

	u64 t1, t2;
	t1 = rdtsc();
	schedule_timeout_uninterruptible(HZ);
	t2 = rdtsc();
	printk("HZ = %d tscHz %d \r\n", HZ, t2 - t1);

	PORT_NUM = 0xC00a + port;

	for (i = 0 ; i < DEV_NUM; i ++ )
	{
		s5bd_dev[i] = kzalloc(sizeof(struct s5bd_device), GFP_KERNEL);
		
		s5bd_dev[i]->image_id = imageid[i] + IMAGE_ID_OFFSET;

	    sprintf(s5bd_dev[i]->blk_name, "s5%dbd", i); 
	    sprintf(s5bd_dev[i]->thr_name, "s5%dkthrd", i);

		log(DBG_ERROR, "%s %d image id %d \r\n", __FUNCTION__, __LINE__, s5bd_dev[i]->image_id);

		if (s5bd_socket_open(s5bd_dev[i], PORT_NUM + i) < 0)
		{
			return -1;	
		}
		
		spin_lock_init(&s5bd_dev[i]->tid_lock);
		spin_lock_init(&s5bd_dev[i]->bio_list_lock);
	    sema_init(&s5bd_dev[i]->sembio_lock, 1);

        init_waitqueue_head(&s5bd_dev[i]->wq_snd);
	    bio_list_init(&s5bd_dev[i]->snd_bio_list);
		bio_list_init(&s5bd_dev[i]->retry_list);
	    s5bd_tid_init(s5bd_dev[i]);
		
		log(DBG_ERROR, "%s %d \r\n", __FUNCTION__, __LINE__);
	
		result = register_blkdev(s5bd_dev[i]->s5bd_major , s5bd_dev[i]->blk_name);
		if (result < 0)
		{
			log(DBG_ERROR, "%s %d \r\n", __FUNCTION__, __LINE__);
			return  -1;
		}	
		else if (result > 0)
			s5bd_dev[i]->s5bd_major = result;

		s5bd_dev[i]->queue = blk_alloc_queue(GFP_KERNEL);
	    if(!s5bd_dev[i]->queue)
		{
			log(DBG_ERROR, "%s %d \r\n", __FUNCTION__, __LINE__);
	    	unregister_blkdev(s5bd_dev[i]->s5bd_major, s5bd_dev[i]->blk_name);
		    return -1;	
		}

		log(DBG_ERROR, "%s %d \r\n", __FUNCTION__, __LINE__);

	    s5bd_dev[i]->queue->queue_flags = QUEUE_FLAG_DEFAULT;
	    queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, s5bd_dev[i]->queue);
	    queue_flag_set_unlocked(QUEUE_FLAG_NONROT, s5bd_dev[i]->queue);
	    blk_queue_make_request(s5bd_dev[i]->queue, s5bd_bio_request);
	    s5bd_dev[i]->queue->queuedata = s5bd_dev[i];
	 
	    s5bd_dev[i]->disk = alloc_disk(MINORS);
	    if(!s5bd_dev[i]->disk)
		{
			blk_cleanup_queue(s5bd_dev[i]->queue);
	    	unregister_blkdev(s5bd_dev[i]->s5bd_major, s5bd_dev[i]->blk_name);
			log(DBG_ERROR, "%s %d \r\n", __FUNCTION__, __LINE__);
			return  -1;
		}
		
	    blk_queue_logical_block_size(s5bd_dev[i]->queue, 4096);
	    blk_queue_max_hw_sectors(s5bd_dev[i]->queue, 8*MAX_4K_CNT);
	    
	    s5bd_dev[i]->disk->major = s5bd_dev[i]->s5bd_major;
	    s5bd_dev[i]->disk->minors = MINORS;
	    s5bd_dev[i]->disk->first_minor = 0;
	    s5bd_dev[i]->disk->fops = &s5bd_fops;
	    s5bd_dev[i]->disk->private_data = s5bd_dev[i];
	    s5bd_dev[i]->disk->queue = s5bd_dev[i]->queue;
		s5bd_dev[i]->lba_min = 0xFFFFFFFF;
		s5bd_dev[i]->lba_max = 0;

	    sprintf(s5bd_dev[i]->disk->disk_name, "s5bd%d", i); 
	  	disk_size = (u64)DISK_4K_LSECTORS * 8; 
	    set_capacity(s5bd_dev[i]->disk, disk_size);
		
	

		log(DBG_ERROR, "%s %d \r\n", __FUNCTION__, __LINE__);

		s5bd_dev[i]->s5bd_thread = kthread_run(s5bd_kthread, s5bd_dev[i], s5bd_dev[i]->thr_name);
		if (IS_ERR(s5bd_dev[i]->s5bd_thread))
			BUG_ON(1);
		struct sched_param param = {.sched_priority = 50};
		sched_setscheduler(s5bd_dev[i]->s5bd_thread, SCHED_FIFO, &param);

		s5bd_dev[i]->s5bd_thread_snd = kthread_run(s5bd_kthread_snd, s5bd_dev[i], "ts5snd");
		if (IS_ERR(s5bd_dev[i]->s5bd_thread_snd))
			BUG_ON(1);
		
		//struct sched_param param1 = {.sched_priority = 51};
		//sched_setscheduler(s5bd_dev[i]->s5bd_thread_snd, SCHED_FIFO, &param1);

		log(DBG_ERROR, "%s %d \r\n", __FUNCTION__, __LINE__);
#ifdef HAS_TIMER
	    init_timer(&s5bd_dev[i]->check_io_timeout_timer);

		s5bd_dev[i]->check_io_timeout_timer.expires = jiffies + IO_TIMEOUT*HZ;
		s5bd_dev[i]->check_io_timeout_timer.data = (unsigned long) s5bd_dev[i];
		s5bd_dev[i]->check_io_timeout_timer.function = &s5bd_timer_handler;	/* timer handler */
		add_timer(&s5bd_dev[i]->check_io_timeout_timer);
#endif
		
	    //set_capacity(s5bd_dev[i]->disk, 0);
		add_disk(s5bd_dev[i]->disk);
	    //set_capacity(s5bd_dev[i]->disk, DISK_4K_LSECTORS * 8);
	}
	
	return 0;
	
}

static void __exit s5bd_exit(void)
{
	u32 i;
	
	for(i = 0 ; i < DEV_NUM; i ++ )
	{
		blk_cleanup_queue(s5bd_dev[i]->queue);
	    if (s5bd_dev[i]->disk->flags & GENHD_FL_UP)
		    del_gendisk(s5bd_dev[i]->disk);
		log(DBG_LOG, "unregister s5bd block device \r\n");
	    unregister_blkdev(s5bd_dev[i]->s5bd_major, s5bd_dev[i]->blk_name);
#ifdef HAS_TIMER
		del_timer_sync(&s5bd_dev[i]->check_io_timeout_timer);
#endif
	//if(s5bd_dev[i]->io_time_out_flag == 0)
			kernel_sock_shutdown(s5bd_dev[i]->s5sock, SHUT_RDWR);
		s5bd_dev[i]->snd_thread_exit = 1;
	    wake_up(&s5bd_dev[i]->wq_snd);	
		schedule_timeout(HZ/2);
		log(DBG_LOG, "thread stop \r\n");
		if (s5bd_dev[i]->exc_exit != 1)
			kthread_stop(s5bd_dev[i]->s5bd_thread);
		schedule_timeout(HZ);
		kfree(s5bd_dev[i]);
		log(DBG_LOG,"s5bd	exit\r\n");
	}
}

module_init(s5bd_init);
module_exit(s5bd_exit);
MODULE_LICENSE("GPL");

