/*
 *This file implements the high level driver functions for hard drives.
 *This code is modified from the block layer of Linux source.
 *Please attribute your credits to Linux developpers if you would like.
 *Please respect GPL if you want to use the code.
 */
#include <cnix/scsi.h>
#include <cnix/kernel.h>
#include <cnix/types.h>
#include <cnix/errno.h>
#include <cnix/buffer.h>
#include <cnix/scatterlist.h>
#include <cnix/pci.h>
#include <cnix/ata.h>
#include <cnix/libata.h>
#include <asm/bitops.h>
#include <cnix/buffer.h>
#include <cnix/partition.h>

/*
 *Low level read/write request definitions to/from the hard drive.
 */
#define REQQ_SIZE	1024
static struct request {
	list_t list;
	struct buf_head * buf;
}reqq[REQQ_SIZE];

static struct request * cur_req = NULL;
static list_t req_freelist;
static struct wait_queue * req_wait = NULL;

static struct request * get_req(void)
{
	unsigned long flags;
	struct request * req;

	lockb_ide(flags);

try_again:
	if(!list_empty(&req_freelist)){
		req = list_entry(req_freelist.next, list, struct request);
		list_del1(&req->list);
	}else{ // wake up in ide_bottom
		sleepon(&req_wait);
		goto try_again;
	}
		
	unlockb_ide(flags);

	return req;
}

static int free_cur_req(void)
{
	struct request * tmp;

	if(list_empty(&req_freelist))
		wakeup(&req_wait);

	tmp = cur_req;

	if(list_empty(&cur_req->list)){
		cur_req = NULL;

		list_add_head(&req_freelist, &tmp->list);
		return 0; // nothing to do
	}

	cur_req = list_entry(cur_req->list.next, list, struct request);

	list_del1(&tmp->list);
	list_add_head(&req_freelist, &tmp->list);

	return 1; // more work to do
}

static void ll_bio(struct buf_head *bh);
typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc);

static void alloc_sg_table(struct sg_table *table,unsigned int nents)
{
	table->sgl = (struct scatterlist *)kmalloc(sizeof(struct scatterlist)*nents,0);
	if(!table->sgl){
		printk("kmalloc failed. %s %s\n",__func__,__FILE__);
		BUG();
	}
	table->nents = nents;
	sg_init_table(table->sgl,nents);
}

static void scsi_setup_fs_cmnd(struct buf_head *bh, struct scsi_cmnd *scmd)
{
	struct page *page;
	unsigned int offset;
	unsigned long nbytes;

	alloc_sg_table(&scmd->sdb.table,1);

	page = virt_to_page(bh->b_data);
	offset = offset_in_page(bh->b_data);
	nbytes = SECTOR_UNIT * SECTOR_SIZE;

	sg_set_page(scmd->sdb.table.sgl,page,nbytes,offset);
	scmd->sdb.length = nbytes;
}

static void sd_prep_scsi_cmd(struct buf_head *bh,struct scsi_cmnd *scmd)
{
	sector_t block = bh->b_blocknr;
	unsigned long count = SECTOR_UNIT; //transfer unit is fixed to 1024 bytes 
	unsigned char protect;
	bool fua;
	bool rw;

	/*
	 *Do this for compatibility
	 */
	if(bh->b_flags & B_READ)
		rw = READ;
	else 
		rw = WRITE;

	scsi_setup_fs_cmnd(bh,scmd);

  if (rw == WRITE) {
    scmd->cmnd[0] = WRITE_6;
    scmd->sd_data_direction = DMA_TO_DEVICE;
  } else {
    scmd->cmnd[0] = READ_6;
    scmd->sd_data_direction = DMA_FROM_DEVICE;
  }	

	/*
 	 * Disk protection is yet to be supported.
	 */
	protect = 0;
	/*
	 *force unit access is off.
	 */
	fua = false;

  if (unlikely(block > 0xffffffff)) {
    scmd->cmnd[0] += READ_16 - READ_6;
    scmd->cmnd[1] = protect | (fua ? 0x8 : 0);
    scmd->cmnd[2] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0;
    scmd->cmnd[3] = sizeof(block) > 4 ? (unsigned char) (block >> 48) & 0xff : 0;
    scmd->cmnd[4] = sizeof(block) > 4 ? (unsigned char) (block >> 40) & 0xff : 0;
    scmd->cmnd[5] = sizeof(block) > 4 ? (unsigned char) (block >> 32) & 0xff : 0;
    scmd->cmnd[6] = (unsigned char) (block >> 24) & 0xff;
    scmd->cmnd[7] = (unsigned char) (block >> 16) & 0xff;
    scmd->cmnd[8] = (unsigned char) (block >> 8) & 0xff;
    scmd->cmnd[9] = (unsigned char) block & 0xff;
    scmd->cmnd[10] = (unsigned char) (count >> 24) & 0xff;
    scmd->cmnd[11] = (unsigned char) (count >> 16) & 0xff;
    scmd->cmnd[12] = (unsigned char) (count >> 8) & 0xff;
    scmd->cmnd[13] = (unsigned char) count & 0xff;
    scmd->cmnd[14] = scmd->cmnd[15] = 0;
  } else { 
    scmd->cmnd[0] += READ_10 - READ_6;
    scmd->cmnd[1] = protect | (fua ? 0x8 : 0);
    scmd->cmnd[2] = (unsigned char) (block >> 24) & 0xff;
    scmd->cmnd[3] = (unsigned char) (block >> 16) & 0xff;
    scmd->cmnd[4] = (unsigned char) (block >> 8) & 0xff;
    scmd->cmnd[5] = (unsigned char) block & 0xff;
    scmd->cmnd[6] = scmd->cmnd[9] = 0;
    scmd->cmnd[7] = (unsigned char) (count >> 8) & 0xff;
    scmd->cmnd[8] = (unsigned char) count & 0xff;
  }
}

/**
 *  scsi_6_lba_len - Get LBA and transfer length
 *  @cdb: SCSI command to translate
 *
 *  Calculate LBA and transfer length for 6-byte commands.
 *
 *  RETURNS:
 *  @plba: the LBA
 *  @plen: the transfer length
 */
static void scsi_6_lba_len(const u8_t *cdb, u64_t *plba, u32_t *plen)
{
  u64_t lba = 0;
  u32_t len;

  lba |= ((u64_t)(cdb[1] & 0x1f)) << 16;
  lba |= ((u64_t)cdb[2]) << 8;
  lba |= ((u64_t)cdb[3]);

  len = cdb[4];

  *plba = lba;
  *plen = len;
}

/**
 *  scsi_10_lba_len - Get LBA and transfer length
 *  @cdb: SCSI command to translate
 *
 *  Calculate LBA and transfer length for 10-byte commands.
 *
 *  RETURNS:
 *  @plba: the LBA
 *  @plen: the transfer length
 */
static void scsi_10_lba_len(const u8_t *cdb, u64_t *plba, u32_t *plen)
{
  u64_t lba = 0;
  u32_t len = 0;

  lba |= ((u64_t)cdb[2]) << 24;
  lba |= ((u64_t)cdb[3]) << 16;
  lba |= ((u64_t)cdb[4]) << 8;
  lba |= ((u64_t)cdb[5]);

  len |= ((u32_t)cdb[7]) << 8;
  len |= ((u32_t)cdb[8]);

  *plba = lba;
  *plen = len;
}

/**
 *  scsi_16_lba_len - Get LBA and transfer length
 *  @cdb: SCSI command to translate
 *
 *  Calculate LBA and transfer length for 16-byte commands.
 *
 *  RETURNS:
 *  @plba: the LBA
 *  @plen: the transfer length
 */
static void scsi_16_lba_len(const u8_t *cdb, u64_t *plba, u32_t *plen)
{
  u64_t lba = 0;
  u32_t len = 0;

  lba |= ((u64_t)cdb[2]) << 56;
  lba |= ((u64_t)cdb[3]) << 48;
  lba |= ((u64_t)cdb[4]) << 40;
  lba |= ((u64_t)cdb[5]) << 32;
  lba |= ((u64_t)cdb[6]) << 24;
  lba |= ((u64_t)cdb[7]) << 16;
  lba |= ((u64_t)cdb[8]) << 8;
  lba |= ((u64_t)cdb[9]);

  len |= ((u32_t)cdb[10]) << 24;
  len |= ((u32_t)cdb[11]) << 16;
  len |= ((u32_t)cdb[12]) << 8;
  len |= ((u32_t)cdb[13]);

  *plba = lba;
  *plen = len;
}

static const u8_t ata_rw_cmds[] = {
  /* pio multi */
  ATA_CMD_READ_MULTI,
  ATA_CMD_WRITE_MULTI,
  ATA_CMD_READ_MULTI_EXT,
  ATA_CMD_WRITE_MULTI_EXT,
  0,
  0,
  0,
  ATA_CMD_WRITE_MULTI_FUA_EXT,
  /* pio */
  ATA_CMD_PIO_READ,
  ATA_CMD_PIO_WRITE,
  ATA_CMD_PIO_READ_EXT,
  ATA_CMD_PIO_WRITE_EXT,
  0,
  0,
  0,
  0,
  /* dma */
  ATA_CMD_READ,
  ATA_CMD_WRITE,
  ATA_CMD_READ_EXT,
  ATA_CMD_WRITE_EXT,
  0,
  0,
  0,
  ATA_CMD_WRITE_FUA_EXT
};

/**
 *  ata_rwcmd_protocol - set taskfile r/w commands and protocol
 *  @tf: command to examine and configure
 *  @dev: device tf belongs to
 *
 *  Examine the device configuration and tf->flags to calculate
 *  the proper read/write commands and protocol to use.
 *
 *  LOCKING:
 *  caller.
 */
static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
{
  u8_t cmd;

  int index, fua, lba48, write;

  fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
  lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
  write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;

  if (dev->flags & ATA_DFLAG_PIO) {
    tf->protocol = ATA_PROT_PIO;
    index = dev->multi_count ? 0 : 8;
  } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
    /* Unable to use DMA due to host limitation */
    tf->protocol = ATA_PROT_PIO;
    index = dev->multi_count ? 0 : 8;
  } else {
    tf->protocol = ATA_PROT_DMA;
    index = 16;
  }

  cmd = ata_rw_cmds[index + fua + lba48 + write];
  if (cmd) {
    tf->command = cmd;
    return 0;
  }
  return -1;
}

/**
 *	ata_build_rw_tf - Build ATA taskfile for given read/write request
 *	@tf: Target ATA taskfile
 *	@dev: ATA device @tf belongs to
 *	@block: Block address
 *	@n_block: Number of blocks
 *	@tf_flags: RW/FUA etc...
 *	@tag: tag
 *
 *	LOCKING:
 *	None.
 *
 *	Build ATA taskfile @tf for read/write request described by
 *	@block, @n_block, @tf_flags and @tag on @dev.
 *
 *	RETURNS:
 *
 *	0 on success, -ERANGE if the request is too large for @dev,
 *	-EINVAL if the request is invalid.
 */
void ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
		    u64_t block, u32_t n_block, unsigned int tf_flags,
		    unsigned int tag)
{
	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
	tf->flags |= tf_flags;

	if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
		/* yay, NCQ */
		if (!lba_48_ok(block, n_block)){
			printk("I: range test failed. %s %s\n",__func__,__FILE__);
			BUG();
			//return -ERANGE;
		}

		tf->protocol = ATA_PROT_NCQ;
		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;

		if (tf->flags & ATA_TFLAG_WRITE)
			tf->command = ATA_CMD_FPDMA_WRITE;
		else
			tf->command = ATA_CMD_FPDMA_READ;

		tf->nsect = tag << 3;
		tf->hob_feature = (n_block >> 8) & 0xff;
		tf->feature = n_block & 0xff;

		tf->hob_lbah = (block >> 40) & 0xff;
		tf->hob_lbam = (block >> 32) & 0xff;
		tf->hob_lbal = (block >> 24) & 0xff;
		tf->lbah = (block >> 16) & 0xff;
		tf->lbam = (block >> 8) & 0xff;
		tf->lbal = block & 0xff;

		tf->device = 1 << 6;
		if (tf->flags & ATA_TFLAG_FUA)
			tf->device |= 1 << 7;
	} else if (dev->flags & ATA_DFLAG_LBA) {
		tf->flags |= ATA_TFLAG_LBA;

		if (lba_28_ok(block, n_block)) {
			/* use LBA28 */
			tf->device |= (block >> 24) & 0xf;
		} else if (lba_48_ok(block, n_block)) {
			if (!(dev->flags & ATA_DFLAG_LBA48)){
				printk("II range test failed. %s %s\n",
					__func__,__FILE__);
				BUG();
				//return -ERANGE;
			}

			/* use LBA48 */
			tf->flags |= ATA_TFLAG_LBA48;

			tf->hob_nsect = (n_block >> 8) & 0xff;

			tf->hob_lbah = (block >> 40) & 0xff;
			tf->hob_lbam = (block >> 32) & 0xff;
			tf->hob_lbal = (block >> 24) & 0xff;
		} else{
			/* request too large even for LBA48 */
			printk("III range test failed. %s %s\n",
				__func__,__FILE__);
			BUG();
			//return -ERANGE;
		}

		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)){
			printk("Invalid ata r/w command. %s %s\n",__func__,__FILE__);
			BUG();
			//return -EINVAL;
		}

		tf->nsect = n_block & 0xff;

		tf->lbah = (block >> 16) & 0xff;
		tf->lbam = (block >> 8) & 0xff;
		tf->lbal = block & 0xff;

		tf->device |= ATA_LBA;
	} else {
		printk("CHS mode is not supported anymore. %s %s\n",
			__func__,__FILE__);
		BUG();
	}
}

/**
 *	ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one
 *	@qc: Storage for translated ATA taskfile
 *
 *	Converts any of six SCSI read/write commands into the
 *	ATA counterpart, including starting sector (LBA),
 *	sector count, and taking into account the device's LBA48
 *	support.
 *
 *	Commands %READ_6, %READ_10, %READ_16, %WRITE_6, %WRITE_10, and
 *	%WRITE_16 are currently supported.
 *
 *	LOCKING:
 *	spin_lock_irqsave(host lock)
 *
 *	RETURNS:
 *	Zero on success, non-zero on error.
 */
static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
{
	struct scsi_cmnd *scmd = qc->scsicmd;
	const u8_t *cdb = scmd->cmnd;
	unsigned int tf_flags = 0;
	u64_t block;
	u32_t n_block;

	if (cdb[0] == WRITE_10 || cdb[0] == WRITE_6 || cdb[0] == WRITE_16)
		tf_flags |= ATA_TFLAG_WRITE;

	/* Calculate the SCSI LBA, transfer length and FUA. */
	switch (cdb[0]) {
	case READ_10:
	case WRITE_10:
		scsi_10_lba_len(cdb, &block, &n_block);
		if (unlikely(cdb[1] & (1 << 3)))
			tf_flags |= ATA_TFLAG_FUA;
		break;
	case READ_6:
	case WRITE_6:
		scsi_6_lba_len(cdb, &block, &n_block);

		/* for 6-byte r/w commands, transfer length 0
		 * means 256 blocks of data, not 0 block.
		 */
		if (!n_block)
			n_block = 256;
		break;
	case READ_16:
	case WRITE_16:
		scsi_16_lba_len(cdb, &block, &n_block);
		if (unlikely(cdb[1] & (1 << 3)))
			tf_flags |= ATA_TFLAG_FUA;
		break;
	default:
		printk("no-byte command %s %s\n",__func__,__FILE__);
		BUG();
	}

	/* Check and compose ATA command */
	if (!n_block){
		/* For 10-byte and 16-byte SCSI R/W commands, transfer
		 * length 0 means transfer 0 block of data.
		 * However, for ATA R/W commands, sector count 0 means
		 * 256 or 65536 sectors, not 0 sectors as in SCSI.
		 *
		 * WARNING: one or two older ATA drives treat 0 as 0...
		 */
		printk("nothing to do ... %s %s\n",__func__,__FILE__);
		BUG();
	}

	qc->flags |= ATA_QCFLAG_IO;
	qc->nbytes = n_block * ATA_SECT_SIZE;

	ata_build_rw_tf(&qc->tf, qc->dev, block, n_block, tf_flags,
			     qc->tag);
	return 0;
}

static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8_t cmd)
{
  switch (cmd) {
  case READ_6:
  case READ_10:
  case READ_16:

  case WRITE_6:
  case WRITE_10:
  case WRITE_16:
    return ata_scsi_rw_xlat;
  }

  return NULL;
}

extern void ata_qc_free(struct ata_queued_cmd *qc);
void ata_scsi_qc_complete(void *data)
{
	struct ata_queued_cmd *qc = (struct ata_queued_cmd *)data;
	struct buf_head *bh = qc->bh;
  int need_sense = (qc->err_mask != 0);
	unsigned long flags=0;

  if (need_sense) {
		printk("scsi sense not supported for now. %s %s\n",
			__func__,__FILE__);
		BUG();
  }   

	if(!cur_req){
		printk("interrupt without request.\n");
		BUG();
	}

  ata_qc_free(qc);

	lockb_ide(flags);
	bh->b_flags |= B_DONE;
	wakeup(&bh->b_wait);
	unlockb_ide(flags);

	if(!free_cur_req())
		return;

	ll_bio(cur_req->buf);
}

/**
 *  ata_qc_new - Request an available ATA command, for queueing
 *  @ap: target port
 *
 *  LOCKING:
 *  None.
 */

static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
{
  struct ata_queued_cmd *qc = NULL;
  unsigned int i;

  /* no command while frozen */
  if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
    return NULL;

  /* the last tag is reserved for internal command. */
  for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
    if (!test_and_set_bit(i, &ap->qc_allocated)) {
      qc = __ata_qc_from_tag(ap, i);
      break;
    }

  if (qc)
    qc->tag = i;

  return qc;
}   

/**
 *  ata_qc_new_init - Request an available ATA command, and initialize it
 *  @dev: Device from whom we request an available command structure
 *
 *  LOCKING:
 *  None.
 */ 

struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
{   
  struct ata_port *ap = dev->link->ap;
  struct ata_queued_cmd *qc; 
    
  qc = ata_qc_new(ap);
  if (qc) {
    qc->scsicmd = NULL;
    qc->ap = ap;
    qc->dev = dev;

    ata_qc_reinit(qc);
  }

  return qc;
} 

/**
 *  ata_scsi_qc_new - acquire new ata_queued_cmd reference
 *  @dev: ATA device to which the new command is attached
 *  @cmd: SCSI command that originated this ATA command
 *  @done: SCSI command completion function
 *
 *  Obtain a reference to an unused ata_queued_cmd structure,
 *  which is the basic libata structure representing a single
 *  ATA command sent to the hardware.
 *
 *  If a command was available, fill in the SCSI-specific
 *  portions of the structure with information on the
 *  current command.
 *
 *  LOCKING:
 *  spin_lock_irqsave(host lock)
 *
 *  RETURNS:
 *  Command allocated, or %NULL if none available.
 */
static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
                struct scsi_cmnd *cmd,
                void (*done)(struct scsi_cmnd *))
{
  struct ata_queued_cmd *qc=NULL;

  qc = ata_qc_new_init(dev);

  if (qc) {
    qc->scsicmd = cmd;
    qc->scsidone = done;

    qc->sg = cmd->sdb.table.sgl;
    qc->n_elem = cmd->sdb.table.nents;
  } else {
		printk("ata queue full. %s %s\n",__func__,__FILE__);
		BUG();
  }

  return qc;
}

/**
 *	ata_scsi_translate - Translate then issue SCSI command to ATA device
 *	@dev: ATA device to which the command is addressed
 *	@cmd: SCSI command to execute
 *	@done: SCSI command completion function
 *	@xlat_func: Actor which translates @cmd to an ATA taskfile
 *
 *	Our ->queuecommand() function has decided that the SCSI
 *	command issued can be directly translated into an ATA
 *	command, rather than handled internally.
 *
 *	This function sets up an ata_queued_cmd structure for the
 *	SCSI command, and sends that ata_queued_cmd to the hardware.
 *
 *	The xlat_func argument (actor) returns 0 if ready to execute
 *	ATA command, else 1 to finish translation. If 1 is returned
 *	then cmd->result (and possibly cmd->sense_buffer) are assumed
 *	to be set reflecting an error condition or clean (early)
 *	termination.
 *
 *	LOCKING:
 *	spin_lock_irqsave(host lock)
 *
 *	RETURNS:
 *	0 on success, SCSI_ML_QUEUE_DEVICE_BUSY if the command
 *	needs to be deferred.
 */
static void ata_scsi_translate(struct buf_head *bh,
						struct ata_device *dev, struct scsi_cmnd *cmd,
			      void (*done)(struct scsi_cmnd *),
			      ata_xlat_func_t xlat_func)
{
	struct ata_queued_cmd *qc=NULL;

	qc = ata_scsi_qc_new(dev, cmd, done);
	if (!qc){
		printk("Not enough memory for new ata commands. %s %s\n",
			__func__,__FILE__);
		BUG();
	}

	/* data is present; dma-map it */
	if (cmd->sd_data_direction == DMA_FROM_DEVICE ||
	    cmd->sd_data_direction == DMA_TO_DEVICE) {
		if (unlikely(scsi_bufflen(cmd) < 1)) {
			printk("zero len r/w req %s %s\n",__func__,__FILE__);
			BUG();
		}

		ata_sg_init(qc, cmd->sdb.table.sgl, cmd->sdb.table.nents);

		qc->dma_dir = cmd->sd_data_direction;
	}

	qc->complete_fn = ata_scsi_qc_complete;
	qc->bh = bh;

	if (xlat_func(qc)){
		printk("SCSI command translation error. %s %s\n",
			__func__,__FILE__);
		BUG();
	}

	/* select device, send command to hardware */
	ata_qc_issue(qc);

	if(qc->tf.flags & ATA_TFLAG_POLLING){
		printk("Polling mode is not supported yet. %s %s\n",__func__,__FILE__);
		BUG();
	}

#if 0
	if(! (qc->tf.flags & ATA_TFLAG_POLLING)){
		iowait(bh);
	}else{
		printk("%s %s\n",__func__,__FILE__);
		BUG();
	}
#endif
}

static inline void ata_scsi_queuecmd(struct buf_head *bh,
							struct scsi_cmnd *scmd,
              void (*done)(struct scsi_cmnd *),
              struct ata_device *dev)
{
  u8_t scsi_op = scmd->cmnd[0];
  ata_xlat_func_t xlat_func;

  if (dev->class == ATA_DEV_ATA) {
    xlat_func = ata_get_xlat_func(dev, scsi_op);
  } else {
		printk("device class not supported. %s %s\n",
				__func__,__FILE__);
		BUG();
  }
	if(!xlat_func){
		printk("unknown scsi command. %s %s\n",
				__func__,__FILE__);
		while(1);
	}
  ata_scsi_translate(bh,dev, scmd, done, xlat_func);
}

void print_link()
{
	struct ata_device *dev = &(ahost->ports[0]->link.device[0]);
	printk("%x\n",dev->link->active_tag);
}

static void ll_bio(struct buf_head *bh)
{
	struct scsi_cmnd *scmd;
	scmd = (struct scsi_cmnd*)kmalloc(sizeof(struct scsi_cmnd),0);
	if(!scmd){
		printk("kmalloc failed. %s %s\n",__func__,__FILE__);
		BUG();
	}
	memset(scmd,0,sizeof(*scmd));

	sd_prep_scsi_cmd(bh,scmd);
	ata_scsi_queuecmd(bh,scmd,NULL,&(ahost->ports[0]->link.device[0]));

	//iowait(bh);

	kfree(scmd);
}

void submit_bio(struct buf_head *bh)
{
	struct request * req;
	unsigned long flags = 0;

	req = get_req();
	req->buf = bh;

	lockb_ide(flags);

	if(cur_req){
		if(list_empty(&cur_req->list))
			list_add_head(&cur_req->list, &req->list);
		else{
			list_t * tmp, * pos;
			struct request * prev, * next;

			prev = cur_req;

			foreach(tmp, pos, &cur_req->list)
				prev = list_entry(tmp, list, struct request);
				next = list_entry(tmp->next,
					list, struct request);

				if((prev->buf->b_blocknr < req->buf->b_blocknr)
					&& (next->buf->b_blocknr >= req->buf->b_blocknr))
					break;
			endeach(&cur_req->list);

			/* req after prev */
			list_add_head(&prev->list, &req->list);
		}

		unlockb_ide(flags);

		return;
	}

	cur_req = req;
	list_head_init(&cur_req->list);

	unlockb_ide(flags);

	ll_bio(cur_req->buf);
}

void sd_init()
{
	int i;
	list_head_init(&req_freelist);

	for(i = 0; i < REQQ_SIZE - 1; i++)
		list_add_head(&req_freelist, &reqq[i].list);

	parse_partition(&sd_partition_table);
}

int sd_open(dev_t dev, int flags)
{
	printk("sd_open called but not implemented.\n");
	DBG1(1);
	return 0;
}

int sd_close(dev_t dev,int flags)
{
	printk("sd_close called but not implemented.\n");
	DBG1(1);
	return 0;
}

ssize_t sd_read(
	dev_t dev,
	char * buffer,
	size_t count,
	off_t off,
	int * error,
	int openflags
	)
{
	printk("sd_read called but not yet implemented.\n");
	DBG1(1);
	return 0;
}

ssize_t sd_write(
	dev_t dev,
	const char * buffer,
	size_t count,
	off_t off,
	int * error,
	int openflags
	)
{
	printk("sd_write called but not implemented.\n");
	DBG1(1);
	return 0;
}

int sd_ioctl(dev_t dev, int request, void * data, int openflags)
{
	printk("sd_ioctl called but not implemented yet.\n");
	DBG1(1);
	return -ENOTSUP;
}
