/*
 *This file is intended to implement a driver for IDE drives or SATA drives operating in compatibility modes.
 *Most hardware specific lines of code are adapted from Linux source.
 *Currently, the design is still not complete yet, which means it may work on some machines, but may fail on others. 
 *You are encouraged to improve this driver or re-write it from scratch based on hardware specifications.
 *Please respect GPL if you are thinking about using this code.
 */
#include <cnix/types.h>
#include <cnix/kernel.h>
#include <cnix/pci.h>
#include <cnix/ata.h>
#include <cnix/libata.h>
#include <cnix/string.h>
#include <cnix/mm.h>
#include <asm/io.h>
#include <cnix/errno.h>
#include <cnix/page.h>
#include <asm/bitops.h>
#include <cnix/scatterlist.h>
#include <cnix/log2.h>
#include <cnix/bottom.h>
#include <cnix/time.h>
#include <cnix/buffer.h>
#include <cnix/partition.h>

#define min(a,b)	((a)<(b)?(a):(b))

//int gplong=0;
struct ata_host *ahost = NULL;

extern void ndelay(unsigned long nsecs);
extern void udelay(unsigned long usecs);
extern void msleep(unsigned long ms);

int sata_scr_read(struct ata_link *link, int reg, u32_t *val);
int sata_scr_write(struct ata_link *link, int reg, u32_t val);
bool ata_phys_link_offline(struct ata_link *link);
int sata_scr_valid(struct ata_link *link);
int sata_pmp_scr_write(struct ata_link *link, int reg, u32_t val);
int sata_link_resume(struct ata_link *link, const unsigned long *params,
         unsigned long deadline);
int ata_wait_ready(struct ata_link *link, unsigned long deadline,
       int (*check_ready)(struct ata_link *link));
static int ata_sff_check_ready(struct ata_link *link);
unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
          u8_t *r_err);
static inline int ata_id_is_cfa(const u16_t *id);
int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
		     u8_t status, int in_wq);
static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap,
                 unsigned int tag);

/* debounce timing parameters in msecs { interval, duration, timeout } */
const unsigned long sata_deb_timing_normal[]    = {   5,  100, 2000 };
const unsigned long sata_deb_timing_hotplug[]   = {  25,  500, 2000 };
const unsigned long sata_deb_timing_long[]    = { 100, 2000, 5000 };

void print_res(struct pci_device *pdev)
{
	int i;
	printk("resource list for dev:%4x:%2x:%2x:%2x %d\n",ATA_BUS_DOMAIN,
			ATA_BUS_NR,(ATA_BUS_DEVFN>>3)&0x1f,(ATA_BUS_DEVFN&0x7),DEVICE_COUNT_RESOURCE);
	for(i=0;i<DEVICE_COUNT_RESOURCE;i++){
		printk("name: %s start: %x end: %x flags: %x\n",
			pdev->resource[i].name,pdev->resource[i].start,
			pdev->resource[i].end,pdev->resource[i].flags);
	}
}

enum {
  PIIX_IOCFG    = 0x54, /* IDE I/O configuration register */
  ICH5_PMR    = 0x90, /* port mapping register */
  ICH5_PCS    = 0x92, /* port control and status */
  PIIX_SIDPR_BAR    = 5,
  PIIX_SIDPR_LEN    = 16,
  PIIX_SIDPR_IDX    = 0,
  PIIX_SIDPR_DATA   = 4,

  PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */
  PIIX_FLAG_SIDPR   = (1 << 29), /* SATA idx/data pair regs */

  PIIX_PATA_FLAGS   = ATA_FLAG_SLAVE_POSS,
  PIIX_SATA_FLAGS   = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR,

  PIIX_80C_PRI    = (1 << 5) | (1 << 4),
  PIIX_80C_SEC    = (1 << 7) | (1 << 6),

  /* constants for mapping table */
  P0      = 0,  /* port 0 */
  P1      = 1,  /* port 1 */
  P2      = 2,  /* port 2 */
  P3      = 3,  /* port 3 */
  IDE     = -1, /* IDE */
  NA      = -2, /* not avaliable */
  RV      = -3, /* reserved */

  PIIX_AHCI_DEVICE  = 6,

  /* host->flags bits */
  PIIX_HOST_BROKEN_SUSPEND = (1 << 24),
};

enum piix_controller_ids {
  /* controller IDs */
  piix_pata_mwdma,  /* PIIX3 MWDMA only */
  piix_pata_33,   /* PIIX4 at 33Mhz */
  ich_pata_33,    /* ICH up to UDMA 33 only */
  ich_pata_66,    /* ICH up to 66 Mhz */
  ich_pata_100,   /* ICH up to UDMA 100 */
  ich_pata_100_nomwdma1,  /* ICH up to UDMA 100 but with no MWDMA1*/
  ich5_sata,
  ich6_sata,
  ich6m_sata,
  ich8_sata,
  ich8_2port_sata,
  ich8m_apple_sata, /* locks up on second port enable */
  tolapai_sata,
  piix_pata_vmw,      /* PIIX4 for VMware, spurious DMA_ERR */
};

static const struct pci_device_id piix_pci_tbl[] = {
	/* Intel PIIX3 for the 430HX etc */
	{ 0x8086, 0x7010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_mwdma },
	/* VMware ICH4 */
	{ 0x8086, 0x7111, 0x15ad, 0x1976, 0, 0, piix_pata_vmw },
	/* Intel PIIX4 for the 430TX/440BX/MX chipset: UDMA 33 */
	/* Also PIIX4E (fn3 rev 2) and PIIX4M (fn3 rev 3) */
	{ 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
	/* Intel PIIX4 */
	{ 0x8086, 0x7199, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
	/* Intel PIIX4 */
	{ 0x8086, 0x7601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
	/* Intel PIIX */
	{ 0x8086, 0x84CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
	/* Intel ICH (i810, i815, i840) UDMA 66*/
	{ 0x8086, 0x2411, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_66 },
	/* Intel ICH0 : UDMA 33*/
	{ 0x8086, 0x2421, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_33 },
	/* Intel ICH2M */
	{ 0x8086, 0x244A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
	/* Intel ICH2 (i810E2, i845, 850, 860) UDMA 100 */
	{ 0x8086, 0x244B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
	/*  Intel ICH3M */
	{ 0x8086, 0x248A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
	/* Intel ICH3 (E7500/1) UDMA 100 */
	{ 0x8086, 0x248B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
	/* Intel ICH4 (i845GV, i845E, i852, i855) UDMA 100 */
	{ 0x8086, 0x24CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
	{ 0x8086, 0x24CB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
	/* Intel ICH5 */
	{ 0x8086, 0x24DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
	/* C-ICH (i810E2) */
	{ 0x8086, 0x245B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
	/* ESB (855GME/875P + 6300ESB) UDMA 100  */
	{ 0x8086, 0x25A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
	/* ICH6 (and 6) (i915) UDMA 100 */
	{ 0x8086, 0x266F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
	/* ICH7/7-R (i945, i975) UDMA 100*/
	{ 0x8086, 0x27DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100_nomwdma1 },
	{ 0x8086, 0x269E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100_nomwdma1 },
	/* ICH8 Mobile PATA Controller */
	{ 0x8086, 0x2850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },

	/* SATA ports */
	
	/* 82801EB (ICH5) */
	{ 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
	/* 82801EB (ICH5) */
	{ 0x8086, 0x24df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
	/* 6300ESB (ICH5 variant with broken PCS present bits) */
	{ 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
	/* 6300ESB pretending RAID */
	{ 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
	/* 82801FB/FW (ICH6/ICH6W) */
	{ 0x8086, 0x2651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
	/* 82801FR/FRW (ICH6R/ICH6RW) */
	{ 0x8086, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
	/* 82801FBM ICH6M (ICH6R with only port 0 and 2 implemented).
	 * Attach iff the controller is in IDE mode. */
	{ 0x8086, 0x2653, PCI_ANY_ID, PCI_ANY_ID,
	  PCI_CLASS_STORAGE_IDE << 8, 0xffff00, ich6m_sata },
	/* 82801GB/GR/GH (ICH7, identical to ICH6) */
	{ 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
	/* 2801GBM/GHM (ICH7M, identical to ICH6M) */
	{ 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata },
	/* Enterprise Southbridge 2 (631xESB/632xESB) */
	{ 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
	/* SATA Controller 1 IDE (ICH8) */
	{ 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
	/* SATA Controller 2 IDE (ICH8) */
	{ 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
	/* Mobile SATA Controller IDE (ICH8M), Apple */
	{ 0x8086, 0x2828, 0x106b, 0x00a0, 0, 0, ich8m_apple_sata },
	{ 0x8086, 0x2828, 0x106b, 0x00a1, 0, 0, ich8m_apple_sata },
	{ 0x8086, 0x2828, 0x106b, 0x00a3, 0, 0, ich8m_apple_sata },
	/* Mobile SATA Controller IDE (ICH8M) */
	{ 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
	/* SATA Controller IDE (ICH9) */
	{ 0x8086, 0x2920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
	/* SATA Controller IDE (ICH9) */
	{ 0x8086, 0x2921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
	/* SATA Controller IDE (ICH9) */
	{ 0x8086, 0x2926, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
	/* SATA Controller IDE (ICH9M) */
	{ 0x8086, 0x2928, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
	/* SATA Controller IDE (ICH9M) */
	{ 0x8086, 0x292d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
	/* SATA Controller IDE (ICH9M) */
	{ 0x8086, 0x292e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
	/* SATA Controller IDE (Tolapai) */
	{ 0x8086, 0x5028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, tolapai_sata },
	/* SATA Controller IDE (ICH10) */
	{ 0x8086, 0x3a00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
	/* SATA Controller IDE (ICH10) */
	{ 0x8086, 0x3a06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
	/* SATA Controller IDE (ICH10) */
	{ 0x8086, 0x3a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
	/* SATA Controller IDE (ICH10) */
	{ 0x8086, 0x3a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
	/* SATA Controller IDE (PCH) */
	{ 0x8086, 0x3b20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
	/* SATA Controller IDE (PCH) */
	{ 0x8086, 0x3b21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
	/* SATA Controller IDE (PCH) */
	{ 0x8086, 0x3b26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
	/* SATA Controller IDE (PCH) */
	{ 0x8086, 0x3b28, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
	/* SATA Controller IDE (PCH) */
	{ 0x8086, 0x3b2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
	/* SATA Controller IDE (PCH) */
	{ 0x8086, 0x3b2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
	{ }	/* terminate list */
};

/*
 *FIXME! We may need more generic code in the future.
 */
int ata_port_start(struct ata_port *ap)
{
	ap->prd_dma = (u32_t)DMA_BASE;
	ap->prd = (struct ata_prd*)KERN_VA(ap->prd_dma);

  return 0;
}

/**
 *  ata_sff_port_start32 - Set port up for dma.
 *  @ap: Port to initialize
 *
 *  Called just after data structures for each port are
 *  initialized.  Allocates space for PRD table if the device
 *  is DMA capable SFF.
 *
 *  May be used as the port_start() entry in ata_port_operations for
 *  devices that are capable of 32bit PIO.
 *
 *  LOCKING:
 *  Inherited from caller.
 */
int ata_sff_port_start32(struct ata_port *ap)
{
  ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
  if (ap->ioaddr.bmdma_addr)
    return ata_port_start(ap);
	else{
		printk("ata port should support DMA. %s %s\n",
				__func__,__FILE__);
		while(1);
	}
}

/**
 *  ata_sff_irq_clear - Clear PCI IDE BMDMA interrupt.
 *  @ap: Port associated with this ATA transaction.
 *
 *  Clear interrupt and error flags in DMA status register.
 *
 *  May be used as the irq_clear() entry in ata_port_operations.
 *
 *  LOCKING:
 *  spin_lock_irqsave(host lock)
 */
void ata_sff_irq_clear(struct ata_port *ap)
{
  void *mmio = ap->ioaddr.bmdma_addr;

  if (!mmio){
		printk("No DMA in %s of %s\n",__func__,__FILE__);
    while(1);
	}

  outb(inb(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
}

/**
 *  ata_sff_check_status - Read device status reg & clear interrupt
 *  @ap: port where the device is
 *
 *  Reads ATA taskfile status register for currently-selected device
 *  and return its value. This also clears pending interrupts
 *      from this device
 *
 *  LOCKING:
 *  Inherited from caller.
 */
u8_t ata_sff_check_status(struct ata_port *ap)
{
  return inb(ap->ioaddr.status_addr);
}

/**
 *  ata_sff_freeze - Freeze SFF controller port
 *  @ap: port to freeze
 *
 *  Freeze BMDMA controller port.
 *
 *  LOCKING:
 *  Inherited from caller.
 */
void ata_sff_freeze(struct ata_port *ap)
{
  struct ata_ioports *ioaddr = &ap->ioaddr;

  ap->ctl |= ATA_NIEN;
  ap->last_ctl = ap->ctl;

  if (ioaddr->ctl_addr)
    outb(ap->ctl, ioaddr->ctl_addr);
	else{
		printk("ctl addr not exist in %s of %s\n",
			__func__,__FILE__);
		while(1);
	}

  /* Under certain circumstances, some controllers raise IRQ on
   * ATA_NIEN manipulation.  Also, many controllers fail to mask
   * previously pending IRQ on ATA_NIEN assertion.  Clear it.
   */
  ap->ops->sff_check_status(ap);

  ap->ops->sff_irq_clear(ap);
}

/**
 *  ata_fill_sg - Fill PCI IDE PRD table
 *  @qc: Metadata associated with taskfile to be transferred
 *
 *  Fill PCI IDE PRD (scatter-gather) table with segments
 *  associated with the current disk command.
 *
 *  LOCKING:
 *  spin_lock_irqsave(host lock)
 *    
 */     
static void ata_fill_sg(struct ata_queued_cmd *qc)
{     
  struct ata_port *ap = qc->ap;
  struct scatterlist *sg;
  unsigned int si, pi;
      
  pi = 0;
  for_each_sg(qc->sg, sg, qc->n_elem, si) {
    u32_t addr, offset;
    u32_t sg_len, len;

    /* determine if physical DMA addr spans 64K boundary.
     */
    addr = sg->dma_address;
    sg_len = sg->dma_length;

    while (sg_len) {
      offset = addr & 0xffff;
      len = sg_len;
      if ((offset + sg_len) > 0x10000)
        len = 0x10000 - offset;
  
      ap->prd[pi].addr = (addr);
      ap->prd[pi].flags_len = (len & 0xffff);
    
      pi++;
      sg_len -= len;
      addr += len;
    }
  }
  
  ap->prd[pi - 1].flags_len |= (ATA_PRD_EOT);
}

/**
 *  ata_sff_qc_prep - Prepare taskfile for submission
 *  @qc: Metadata associated with taskfile to be prepared
 *
 *  Prepare ATA taskfile for submission.
 *
 *  LOCKING:
 *  spin_lock_irqsave(host lock)
 */
void ata_sff_qc_prep(struct ata_queued_cmd *qc)
{
  if (!(qc->flags & ATA_QCFLAG_DMAMAP))
    return;

  ata_fill_sg(qc);
}

/**
 *  ata_sff_busy_wait - Wait for a port status register
 *  @ap: Port to wait for.
 *  @bits: bits that must be clear
 *  @max: number of 10uS waits to perform
 *
 *  Waits up to max*10 microseconds for the selected bits in the port's
 *  status register to be cleared.
 *  Returns final value of status register.
 *
 *  LOCKING:
 *  Inherited from caller.
 */
static inline u8_t ata_sff_busy_wait(struct ata_port *ap, unsigned int bits,
           unsigned int max)
{        
  u8_t status;
  
  do {
    udelay(10);
    status = ap->ops->sff_check_status(ap);
    max--;
  } while (status != 0xff && (status & bits) && (max > 0));
                 
  return status;
}

/** 
 *  ata_wait_idle - Wait for a port to be idle.
 *  @ap: Port to wait for.
 *
 *  Waits up to 10ms for port's BUSY and DRQ signals to clear.
 *  Returns final value of status register.
 *
 *  LOCKING:
 *  Inherited from caller.
 */
static inline u8_t ata_wait_idle(struct ata_port *ap)
{
  u8_t status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);

  return status;
}

/**
 *  ata_dev_select - Select device 0/1 on ATA bus
 *  @ap: ATA channel to manipulate
 *  @device: ATA device (numbered from zero) to select
 *  @wait: non-zero to wait for Status register BSY bit to clear
 *  @can_sleep: non-zero if context allows sleeping
 *
 *  Use the method defined in the ATA specification to
 *  make either device 0, or device 1, active on the
 *  ATA channel.
 *
 *  This is a high-level version of ata_sff_dev_select(), which
 *  additionally provides the services of inserting the proper
 *  pauses and status polling, where needed.
 *
 *  LOCKING:
 *  caller.
 */
void ata_dev_select(struct ata_port *ap, unsigned int device,
         unsigned int wait, unsigned int can_sleep)
{
	if(unlikely(!!can_sleep)){
		printk("can not sleep here. %s in %s\n",__func__,__FILE__);
		while(1);
	}
	
  if (wait)
    ata_wait_idle(ap);

  ap->ops->sff_dev_select(ap, device);

  if (wait) {
    ata_wait_idle(ap);
  }
}

/**
 *  ata_tf_to_host - issue ATA taskfile to host controller
 *  @ap: port to which command is being issued
 *  @tf: ATA taskfile register set
 *
 *  Issues ATA taskfile register set to ATA host controller,
 *  with proper synchronization with interrupt handler and
 *  other threads.
 *
 *  LOCKING:
 *  spin_lock_irqsave(host lock)
 */
static inline void ata_tf_to_host(struct ata_port *ap,
          const struct ata_taskfile *tf)
{
  ap->ops->sff_tf_load(ap, tf);
  ap->ops->sff_exec_command(ap, tf);
}

void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay)
{
	printk("Queued work not supported now.\n");
//	while(1);
}   

/**
 *  ata_sff_qc_issue - issue taskfile to device in proto-dependent manner
 *  @qc: command to issue to device
 *
 *  Using various libata functions and hooks, this function
 *  starts an ATA command.  ATA commands are grouped into
 *  classes called "protocols", and issuing each type of protocol
 *  is slightly different.
 *
 *  May be used as the qc_issue() entry in ata_port_operations.
 *
 *  LOCKING:
 *  spin_lock_irqsave(host lock)
 *
 *  RETURNS:
 *  Zero on success, AC_ERR_* mask on failure
 */
unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
{
	struct ata_port *ap = qc->ap;

	/* Use polling pio if the LLD doesn't handle
	 * interrupt driven pio and atapi CDB interrupt.
	 */
	if (ap->flags & ATA_FLAG_PIO_POLLING) {
		switch (qc->tf.protocol) {
		case ATA_PROT_PIO:
		case ATA_PROT_NODATA:
		case ATAPI_PROT_PIO:
		case ATAPI_PROT_NODATA:
			qc->tf.flags |= ATA_TFLAG_POLLING;
			break;
		case ATAPI_PROT_DMA:
			if (qc->dev->flags & ATA_DFLAG_CDB_INTR){
				/* see ata_dma_blacklisted() */
				printk("error %s in %s\n",__func__,__FILE__);
				while(1);
			}
			break;
		default:
			break;
		}
	}

	/* select the device */
	ata_dev_select(ap, qc->dev->devno, 1, 0);

	/* start the command */
	switch (qc->tf.protocol) {
	case ATA_PROT_NODATA:
		if (qc->tf.flags & ATA_TFLAG_POLLING)
			ata_qc_set_polling(qc);

		ata_tf_to_host(ap, &qc->tf);
		ap->hsm_task_state = HSM_ST_LAST;

		if (qc->tf.flags & ATA_TFLAG_POLLING)
			ata_pio_queue_task(ap, qc, 0);

		break;

	case ATA_PROT_DMA:
		ap->hsm_task_state = HSM_ST_LAST;
		ap->ops->sff_tf_load(ap, &qc->tf);  /* load tf registers */
		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
		ap->ops->bmdma_start(qc);	    /* initiate bmdma */
		break;

	case ATA_PROT_PIO:
		if (qc->tf.flags & ATA_TFLAG_POLLING)
			ata_qc_set_polling(qc);

		ata_tf_to_host(ap, &qc->tf);

		if (qc->tf.flags & ATA_TFLAG_WRITE) {
			/* PIO data out protocol */
			ap->hsm_task_state = HSM_ST_FIRST;
			ata_pio_queue_task(ap, qc, 0);

			/* always send first data block using
			 * the ata_pio_task() codepath.
			 */
		} else {
			/* PIO data in protocol */
			ap->hsm_task_state = HSM_ST;

			if (qc->tf.flags & ATA_TFLAG_POLLING)
				ata_pio_queue_task(ap, qc, 0);

			/* if polling, ata_pio_task() handles the rest.
			 * otherwise, interrupt handler takes over from here.
			 */
		}

		break;

	case ATAPI_PROT_PIO:
	case ATAPI_PROT_NODATA:
		if (qc->tf.flags & ATA_TFLAG_POLLING)
			ata_qc_set_polling(qc);

		ata_tf_to_host(ap, &qc->tf);

		ap->hsm_task_state = HSM_ST_FIRST;

		/* send cdb by polling if no cdb interrupt */
		if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
		    (qc->tf.flags & ATA_TFLAG_POLLING))
			ata_pio_queue_task(ap, qc, 0);
		break;

	case ATAPI_PROT_DMA:
		ap->ops->sff_tf_load(ap, &qc->tf);  /* load tf registers */
		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
		ap->hsm_task_state = HSM_ST_FIRST;

		/* send cdb by polling if no cdb interrupt */
		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
			ata_pio_queue_task(ap, qc, 0);
		break;

	default:
		return AC_ERR_SYSTEM;
	}

	return 0;
}

/**
 *  ata_sff_qc_fill_rtf - fill result TF using ->sff_tf_read
 *  @qc: qc to fill result TF for
 *
 *  @qc is finished and result TF needs to be filled.  Fill it
 *  using ->sff_tf_read.
 *
 *  LOCKING:
 *  spin_lock_irqsave(host lock)
 *
 *  RETURNS:
 *  true indicating that result TF is successfully filled.
 */
bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc)
{
  qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf);
  return true;
}

void ata_sff_error_handler(struct ata_port *ap)
{
	printk("ata_sff_error_handler. \n");
	while(1);
}

/**
 *  ata_sff_sync - Flush writes
 *  @ap: Port to wait for.
 *
 *  CAUTION:
 *  If we have an mmio device with no ctl and no altstatus
 *  method this will fail. No such devices are known to exist.
 *
 *  LOCKING:
 *  Inherited from caller.
 */

static void ata_sff_sync(struct ata_port *ap)
{
  if (ap->ops->sff_check_altstatus)
    ap->ops->sff_check_altstatus(ap);
  else if (ap->ioaddr.altstatus_addr)
    inb(ap->ioaddr.altstatus_addr);
}

/**
 *  ata_sff_pause   - Flush writes and wait 400nS
 *  @ap: Port to pause for.
 *
 *  CAUTION:
 *  If we have an mmio device with no ctl and no altstatus
 *  method this will fail. No such devices are known to exist.
 *
 *  LOCKING:
 *  Inherited from caller.
 */

void ata_sff_pause(struct ata_port *ap)
{
  ata_sff_sync(ap);
  ndelay(400);
}

/**
 *  ata_sff_dev_select - Select device 0/1 on ATA bus
 *  @ap: ATA channel to manipulate
 *  @device: ATA device (numbered from zero) to select
 *
 *  Use the method defined in the ATA specification to
 *  make either device 0, or device 1, active on the
 *  ATA channel.  Works with both PIO and MMIO.
 *
 *  May be used as the dev_select() entry in ata_port_operations.
 *
 *  LOCKING:
 *  caller.
 */
void ata_sff_dev_select(struct ata_port *ap, unsigned int device)
{
  u8_t tmp;

  if (device == 0)
    tmp = ATA_DEVICE_OBS;
  else
    tmp = ATA_DEVICE_OBS | ATA_DEV1;

  outb(tmp, ap->ioaddr.device_addr);
  ata_sff_pause(ap);  /* needed; also flushes, for mmio */
}

/**
 *  ata_sff_tf_load - send taskfile registers to host controller
 *  @ap: Port to which output is sent
 *  @tf: ATA taskfile register set
 *
 *  Outputs ATA taskfile to standard ATA host controller.
 *
 *  LOCKING:
 *  Inherited from caller.
 */
void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
{
  struct ata_ioports *ioaddr = &ap->ioaddr;
  unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;

  if (tf->ctl != ap->last_ctl) {
    if (ioaddr->ctl_addr)
      outb(tf->ctl, ioaddr->ctl_addr);
    ap->last_ctl = tf->ctl;
    ata_wait_idle(ap);
  }

  if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
    outb(tf->hob_feature, ioaddr->feature_addr);
    outb(tf->hob_nsect, ioaddr->nsect_addr);
    outb(tf->hob_lbal, ioaddr->lbal_addr);
    outb(tf->hob_lbam, ioaddr->lbam_addr);
    outb(tf->hob_lbah, ioaddr->lbah_addr);
  }

  if (is_addr) {
    outb(tf->feature, ioaddr->feature_addr);
    outb(tf->nsect, ioaddr->nsect_addr);
    outb(tf->lbal, ioaddr->lbal_addr);
    outb(tf->lbam, ioaddr->lbam_addr);
    outb(tf->lbah, ioaddr->lbah_addr);
  }

  if (tf->flags & ATA_TFLAG_DEVICE) {
    outb(tf->device, ioaddr->device_addr);
  }

  ata_wait_idle(ap);
}

/**
 *  ata_bmdma_setup - Set up PCI IDE BMDMA transaction
 *  @qc: Info associated with this ATA transaction.
 *
 *  LOCKING:
 *  spin_lock_irqsave(host lock)
 */
void ata_bmdma_setup(struct ata_queued_cmd *qc)
{ 
  struct ata_port *ap = qc->ap;
  unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
  u8_t dmactl;
  
  /* load PRD table addr. */
  mb(); /* make sure PRD table writes are visible to controller */
  outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);

  /* specify data direction, triple-check start bit is clear */
  dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
  dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
  if (!rw)
    dmactl |= ATA_DMA_WR;
  outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
  
  /* issue r/w command */
  ap->ops->sff_exec_command(ap, &qc->tf);
}   

/**
 *  ata_sff_altstatus - Read device alternate status reg
 *  @ap: port where the device is
 *
 *  Reads ATA taskfile alternate status register for
 *  currently-selected device and return its value.
 *
 *  Note: may NOT be used as the check_altstatus() entry in
 *  ata_port_operations.
 *
 *  LOCKING:
 *  Inherited from caller.
 */
static u8_t ata_sff_altstatus(struct ata_port *ap)
{   
  if (ap->ops->sff_check_altstatus)
    return ap->ops->sff_check_altstatus(ap);
  
  return inb(ap->ioaddr.altstatus_addr);
} 

/**
 *  ata_sff_dma_pause - Pause before commencing DMA
 *  @ap: Port to pause for.
 *
 *  Perform I/O fencing and ensure sufficient cycle delays occur
 *  for the HDMA1:0 transition
 */

void ata_sff_dma_pause(struct ata_port *ap)
{
  if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
    /* An altstatus read will cause the needed delay without
       messing up the IRQ status */
    ata_sff_altstatus(ap);
    return;
  }
  /* There are no DMA controllers without ctl. BUG here to ensure
     we never violate the HDMA1:0 transition timing and risk
     corruption. */
	printk("DMA error. %s in %s\n",__func__,__FILE__);
	while(1);
} 

/**
 *  ata_sff_post_internal_cmd - Stock post_internal_cmd for SFF controller
 *  @qc: internal command to clean up
 *
 *  LOCKING:
 *  Kernel thread context (may sleep)
 */
void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc)
{
  struct ata_port *ap = qc->ap;

  ap->hsm_task_state = HSM_ST_IDLE;

  if (ap->ioaddr.bmdma_addr)
    ap->ops->bmdma_stop(qc);
}

/**
 *  ata_sff_exec_command - issue ATA command to host controller
 *  @ap: port to which command is being issued
 *  @tf: ATA taskfile register set
 *
 *  Issues ATA command, with proper synchronization with interrupt
 *  handler / other threads.
 *
 *  LOCKING:
 *  spin_lock_irqsave(host lock)
 */
void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
{
  outb(tf->command, ap->ioaddr.command_addr);
  ata_sff_pause(ap);
}

/**
 *  ata_sff_irq_on - Enable interrupts on a port.
 *  @ap: Port on which interrupts are enabled.
 *
 *  Enable interrupts on a legacy IDE device using MMIO or PIO,
 *  wait for idle, clear any pending interrupts.
 *
 *  LOCKING:
 *  Inherited from caller.
 */
u8_t ata_sff_irq_on(struct ata_port *ap)
{
  struct ata_ioports *ioaddr = &ap->ioaddr;
  u8_t tmp;

  ap->ctl &= ~ATA_NIEN;
  ap->last_ctl = ap->ctl;

  if (ioaddr->ctl_addr)
    outb(ap->ctl, ioaddr->ctl_addr);
  tmp = ata_wait_idle(ap);

  ap->ops->sff_irq_clear(ap);

  return tmp;
}

/**
 *  ata_sff_thaw - Thaw SFF controller port
 *  @ap: port to thaw
 *
 *  Thaw SFF controller port.
 *
 *  LOCKING:
 *  Inherited from caller.
 */
void ata_sff_thaw(struct ata_port *ap)
{
  /* clear & re-enable interrupts */
  ap->ops->sff_check_status(ap);
  ap->ops->sff_irq_clear(ap);
  ap->ops->sff_irq_on(ap);
}

static bool ata_sstatus_online(u32_t sstatus)
{
  return (sstatus & 0xf) == 0x3;
}

/**
 *  ata_phys_link_online - test whether the given link is online
 *  @link: ATA link to test
 *
 *  Test whether @link is online.  Note that this function returns
 *  0 if online status of @link cannot be obtained, so
 *  ata_link_online(link) != !ata_link_offline(link).
 *
 *  LOCKING:
 *  None.
 *
 *  RETURNS:
 *  True if the port online status is available and online.
 */
bool ata_phys_link_online(struct ata_link *link)
{
  u32_t sstatus;

  if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
      ata_sstatus_online(sstatus))
    return true;
  return false;
}

/**
 *  sata_print_link_status - Print SATA link status
 *  @link: SATA link to printk link status about
 *
 *  This function prints link speed and status of a SATA link.
 *
 *  LOCKING:
 *  None.
 */
static void sata_print_link_status(struct ata_link *link)
{
  u32_t sstatus, scontrol, tmp;

  if (sata_scr_read(link, SCR_STATUS, &sstatus))
    return;
  sata_scr_read(link, SCR_CONTROL, &scontrol);

  if (ata_phys_link_online(link)) {
    tmp = (sstatus >> 4) & 0xf;
    printk(
        "SATA link up (SStatus %X SControl %X)\n",
        sstatus, scontrol);
  } else {
    printk(
        "SATA link down (SStatus %X SControl %X)\n",
        sstatus, scontrol);
  }
}

/**
 *  ata_std_postreset - standard postreset callback
 *  @link: the target ata_link
 *  @classes: classes of attached devices
 *
 *  This function is invoked after a successful reset.  Note that
 *  the device might have been reset more than once using
 *  different reset methods before postreset is invoked.
 *
 *  LOCKING:
 *  Kernel thread context (may sleep)
 */
void ata_std_postreset(struct ata_link *link, unsigned int *classes)
{
  u32_t serror;

  /* reset complete, clear SError */
  if (!sata_scr_read(link, SCR_ERROR, &serror))
    sata_scr_write(link, SCR_ERROR, serror);

  /* print link status */
  sata_print_link_status(link);
}

/**
 *  ata_sff_postreset - SFF postreset callback
 *  @link: the target SFF ata_link
 *  @classes: classes of attached devices
 *
 *  This function is invoked after a successful reset.  It first
 *  calls ata_std_postreset() and performs SFF specific postreset
 *  processing.
 *
 *  LOCKING:
 *  Kernel thread context (may sleep)
 */
void ata_sff_postreset(struct ata_link *link, unsigned int *classes)
{
  struct ata_port *ap = link->ap;

  ata_std_postreset(link, classes);
	
  /* is double-select really necessary? */
  if (classes[0] != ATA_DEV_NONE)
    ap->ops->sff_dev_select(ap, 1);
  if (classes[1] != ATA_DEV_NONE)
    ap->ops->sff_dev_select(ap, 0);

  /* bail out if no device is present */
  if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
    printk("EXIT, no device %s %s\n",__func__,__FILE__);
		while(1);
  }

  /* set up device control */
  if (ap->ioaddr.ctl_addr) {
    outb(ap->ctl, ap->ioaddr.ctl_addr);
    ap->last_ctl = ap->ctl;
  }
}

static int __sata_set_spd_needed(struct ata_link *link, u32_t *scontrol)
{
  struct ata_link *host_link = &link->ap->link;
  u32_t limit, target, spd;

  limit = link->sata_spd_limit;

  /* Don't configure downstream link faster than upstream link.
   * It doesn't speed up anything and some PMPs choke on such
   * configuration.
   */
  if (!ata_is_host_link(link) && host_link->sata_spd)
    limit &= (1 << host_link->sata_spd) - 1;

  if (limit == UINT_MAX)
    target = 0;
  else
    target = fls(limit);

  spd = (*scontrol >> 4) & 0xf;
  *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);

  return spd != target;
}

/**
 *  sata_set_spd_needed - is SATA spd configuration needed
 *  @link: Link in question
 *
 *  Test whether the spd limit in SControl matches
 *  @link->sata_spd_limit.  This function is used to determine
 *  whether hardreset is necessary to apply SATA spd
 *  configuration.
 *
 *  LOCKING:
 *  Inherited from caller.
 *
 *  RETURNS:
 *  1 if SATA spd configuration is needed, 0 otherwise.
 */
static int sata_set_spd_needed(struct ata_link *link)
{
  u32_t scontrol;

  if (sata_scr_read(link, SCR_CONTROL, &scontrol))
    return 1;

  return __sata_set_spd_needed(link, &scontrol);
}

/**
 *  sata_set_spd - set SATA spd according to spd limit
 *  @link: Link to set SATA spd for
 *
 *  Set SATA spd of @link according to sata_spd_limit.
 *
 *  LOCKING:
 *  Inherited from caller.
 *
 *  RETURNS:
 *  0 if spd doesn't need to be changed, 1 if spd has been
 *  changed.  Negative errno if SCR registers are inaccessible.
 */
int sata_set_spd(struct ata_link *link)
{
  u32_t scontrol;
  int rc;

  if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
    return rc;

  if (!__sata_set_spd_needed(link, &scontrol))
    return 0;

  if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
    return rc;

  return 1;
}

/**
 *  sata_scr_write_flush - write SCR register of the specified port and flush
 *  @link: ATA link to write SCR for
 *  @reg: SCR to write
 *  @val: value to write
 *
 *  This function is identical to sata_scr_write() except that this
 *  function performs flush after writing to the register.
 *
 *  LOCKING:
 *  None if @link is ap->link.  Kernel thread context otherwise.
 *
 *  RETURNS:
 *  0 on success, negative errno on failure.
 */
int sata_scr_write_flush(struct ata_link *link, int reg, u32_t val)
{
  if (ata_is_host_link(link)) {
    int rc;

    if (sata_scr_valid(link)) {
      rc = link->ap->ops->scr_write(link, reg, val);
      if (rc == 0)
        rc = link->ap->ops->scr_read(link, reg, &val);
      return rc;
    }
    return -EOPNOTSUPP;
  }

  return sata_pmp_scr_write(link, reg, val);
}

/**
 *  sata_link_hardreset - reset link via SATA phy reset
 *  @link: link to reset
 *  @timing: timing parameters { interval, duratinon, timeout } in msec
 *  @deadline: deadline jiffies for the operation
 *  @online: optional out parameter indicating link onlineness
 *  @check_ready: optional callback to check link readiness
 *
 *  SATA phy-reset @link using DET bits of SControl register.
 *  After hardreset, link readiness is waited upon using
 *  ata_wait_ready() if @check_ready is specified.  LLDs are
 *  allowed to not specify @check_ready and wait itself after this
 *  function returns.  Device classification is LLD's
 *  responsibility.
 *
 *  *@online is set to one iff reset succeeded and @link is online
 *  after reset.
 *
 *  LOCKING:
 *  Kernel thread context (may sleep)
 *
 *  RETURNS:
 *  0 on success, -errno otherwise.
 */
int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
      unsigned long deadline,
      bool *online, int (*check_ready)(struct ata_link *))
{
  u32_t scontrol;
  int rc;

  if (online)
    *online = false;

  if (sata_set_spd_needed(link)) {
    /* SATA spec says nothing about how to reconfigure
     * spd.  To be on the safe side, turn off phy during
     * reconfiguration.  This works for at least ICH7 AHCI
     * and Sil3124.
     */
    if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
      goto out;

    scontrol = (scontrol & 0x0f0) | 0x304;

    if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
      goto out;

    sata_set_spd(link);
  }

  /* issue phy wake/reset */
  if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
    goto out;

  scontrol = (scontrol & 0x0f0) | 0x301;

  if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
    goto out;

  /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
   * 10.4.2 says at least 1 ms.
   */
  msleep(1);

  /* bring link back */
  rc = sata_link_resume(link, timing, deadline);
  if (rc)
    goto out;
  /* if link is offline nothing more to do */
  if (ata_phys_link_offline(link))
    goto out;

  /* Link is online.  From this point, -ENODEV too is an error. */
  if (online)
    *online = true;

  if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
    /* If PMP is supported, we have to do follow-up SRST.
     * Some PMPs don't send D2H Reg FIS after hardreset if
     * the first port is empty.  Wait only for
     * ATA_TMOUT_PMP_SRST_WAIT.
     */
    if (check_ready) {
      unsigned long pmp_deadline;

      pmp_deadline = ata_deadline(nowticks,
                ATA_TMOUT_PMP_SRST_WAIT);
      if (time_after(pmp_deadline, deadline))
        pmp_deadline = deadline;
      ata_wait_ready(link, pmp_deadline, check_ready);
    }
    rc = -EAGAIN;
    goto out;
  }

  rc = 0;
  if (check_ready)
    rc = ata_wait_ready(link, deadline, check_ready);
 out:
  if (rc && rc != -EAGAIN) {
    /* online is set iff link is online && reset succeeded */
    if (online)
      *online = false;
    printk("COMRESET failed (errno=%d)\n", rc);
  }
  return rc;
}

/**
 *  sata_sff_hardreset - reset host port via SATA phy reset
 *  @link: link to reset
 *  @class: resulting class of attached device
 *  @deadline: deadline jiffies for the operation
 *
 *  SATA phy-reset host port using DET bits of SControl register,
 *  wait for !BSY and classify the attached device.
 *
 *  LOCKING:
 *  Kernel thread context (may sleep)
 *
 *  RETURNS:
 *  0 on success, -errno otherwise.
 */
int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
           unsigned long deadline)
{
  struct ata_eh_context *ehc = &link->eh_context;
  const unsigned long *timing = sata_ehc_deb_timing(ehc);
  bool online;
  int rc;

  rc = sata_link_hardreset(link, timing, deadline, &online,
         ata_sff_check_ready);
  if (online)
    *class = ata_sff_dev_classify(link->device, 1, NULL);
  return rc;
}

/**
 *  sata_link_debounce - debounce SATA phy status
 *  @link: ATA link to debounce SATA phy status for
 *  @params: timing parameters { interval, duratinon, timeout } in msec
 *  @deadline: deadline jiffies for the operation
 *
* Make sure SStatus of @link reaches stable state, determined by
 *  holding the same value where DET is not 1 for @duration polled
 *  every @interval, before @timeout.  Timeout constraints the
 *  beginning of the stable state.  Because DET gets stuck at 1 on
 *  some controllers after hot unplugging, this functions waits
 *  until timeout then returns 0 if DET is stable at 1.
 *
 *  @timeout is further limited by @deadline.  The sooner of the
 *  two is used.
 *
 *  LOCKING:
 *  Kernel thread context (may sleep)
 *
 *  RETURNS:
 *  0 on success, -errno on failure.
 */
int sata_link_debounce(struct ata_link *link, const unsigned long *params,
           unsigned long deadline)
{
  unsigned long interval = params[0];
  unsigned long duration = params[1];
  unsigned long last_jiffies, t;
  u32_t last, cur;
  int rc;

  t = ata_deadline(nowticks, params[2]);
  if (time_before(t, deadline))
    deadline = t;

  if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
    return rc;
  cur &= 0xf;

  last = cur;
  last_jiffies = nowticks;

  while (1) {
    msleep(interval);
    if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
      return rc;
    cur &= 0xf;

    /* DET stable? */
    if (cur == last) {
      if (cur == 1 && time_before(nowticks, deadline))
        continue;
      if (time_after(nowticks,
               ata_deadline(last_jiffies, duration)))
        return 0;
      continue;
    }

    /* unstable, start over */
    last = cur;
    last_jiffies = nowticks;

    /* Check deadline.  If debouncing failed, return
     * -EPIPE to tell upper layer to lower link speed.
     */
    if (time_after(nowticks, deadline))
      return -EPIPE;
  }
}

/**
 *  sata_link_resume - resume SATA link
 *  @link: ATA link to resume SATA
 *  @params: timing parameters { interval, duratinon, timeout } in msec
 *  @deadline: deadline jiffies for the operation
 *
 *  Resume SATA phy @link and debounce it.
 *
 *  LOCKING:
 *  Kernel thread context (may sleep)
 *
 *  RETURNS:
 *  0 on success, -errno on failure.
 */
int sata_link_resume(struct ata_link *link, const unsigned long *params,
         unsigned long deadline)
{
  int tries = ATA_LINK_RESUME_TRIES;
  u32_t scontrol, serror;
  int rc;

  if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
    return rc;

  /*
   * Writes to SControl sometimes get ignored under certain
   * controllers (ata_piix SIDPR).  Make sure DET actually is
   * cleared.
   */
  do {
    scontrol = (scontrol & 0x0f0) | 0x300;
    if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
      return rc;
    /*
     * Some PHYs react badly if SStatus is pounded
     * immediately after resuming.  Delay 200ms before
     * debouncing.
     */
    msleep(200);

    /* is SControl restored correctly? */
    if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
      return rc;
  } while ((scontrol & 0xf0f) != 0x300 && --tries);

  if ((scontrol & 0xf0f) != 0x300) {
    printk("failed to resume link (SControl %X)\n",
        scontrol);
    return 0;
  }

  if (tries < ATA_LINK_RESUME_TRIES)
    printk("link resume succeeded after %d retries\n",
        ATA_LINK_RESUME_TRIES - tries);

  if ((rc = sata_link_debounce(link, params, deadline)))
    return rc;

  /* clear SError, some PHYs require this even for SRST to work */
  if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
    rc = sata_scr_write(link, SCR_ERROR, serror);

  return rc != -EINVAL ? rc : 0;
}

/**
 *  ata_std_prereset - prepare for reset
 *  @link: ATA link to be reset
 *  @deadline: deadline jiffies for the operation
 *
 *  @link is about to be reset.  Initialize it.  Failure from
 *  prereset makes libata abort whole reset sequence and give up
 *  that port, so prereset should be best-effort.  It does its
 *  best to prepare for reset sequence but if things go wrong, it
 *  should just whine, not fail.
 *
 *  LOCKING:
 *  Kernel thread context (may sleep)
 *
 *  RETURNS:
 *  0 on success, -errno otherwise.
 */
int ata_std_prereset(struct ata_link *link, unsigned long deadline)
{
  struct ata_port *ap = link->ap;
  struct ata_eh_context *ehc = &link->eh_context;
  const unsigned long *timing = sata_ehc_deb_timing(ehc);
  int rc;

  /* if we're about to do hardreset, nothing more to do */
  if (ehc->i.action & ATA_EH_HARDRESET)
    return 0;

  /* if SATA, resume link */
  if (ap->flags & ATA_FLAG_SATA) {
    rc = sata_link_resume(link, timing, deadline);
    /* whine about phy resume failure but proceed */
    if (rc && rc != -EOPNOTSUPP)
      printk("failed to resume "
          "link for reset (errno=%d)\n", rc);
  }

  /* no point in trying softreset on offline link */
  if (ata_phys_link_offline(link))
    ehc->i.action &= ~ATA_EH_SOFTRESET;

  return 0;
}

/**
 *  ata_phys_link_offline - test whether the given link is offline
 *  @link: ATA link to test
 *
 *  Test whether @link is offline.  Note that this function
 *  returns 0 if offline status of @link cannot be obtained, so
 *  ata_link_online(link) != !ata_link_offline(link).
 *
 *  LOCKING:
 *  None.
 *
 *  RETURNS:
 *  True if the port offline status is available and offline.
 */
bool ata_phys_link_offline(struct ata_link *link)
{
  u32_t sstatus;

  if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
      !ata_sstatus_online(sstatus))
    return true;
  return false;
}

/**
 *  ata_link_offline - test whether the given link is offline
 *  @link: ATA link to test
 *
 *  Test whether @link is offline.  This is identical to
 *  ata_phys_link_offline() when there's no slave link.  When
 *  there's a slave link, this function should only be called on
 *  the master link and will return true if both M/S links are
 *  offline.
 *
 *  LOCKING:
 *  None.
 *
 *  RETURNS:
 *  True if the port offline status is available and offline.
 */
bool ata_link_offline(struct ata_link *link)
{
  struct ata_link *slave = link->ap->slave_link;

  if(link == slave){
		printk("%s %s\n",__func__,__FILE__);
		while(1);
	}

  return ata_phys_link_offline(link) &&
    (!slave || ata_phys_link_offline(slave));
}

static int ata_sff_check_ready(struct ata_link *link)
{
  u8_t status = link->ap->ops->sff_check_status(link->ap);

  return ata_check_ready(status);
}

/**
 *  ata_link_online - test whether the given link is online
 *  @link: ATA link to test
 *
 *  Test whether @link is online.  This is identical to
 *  ata_phys_link_online() when there's no slave link.  When
 *  there's a slave link, this function should only be called on
 *  the master link and will return true if any of M/S links is
 *  online.
 *
 *  LOCKING:
 *  None.
 *
 *  RETURNS:
 *  True if the port online status is available and online.
 */
bool ata_link_online(struct ata_link *link)
{
  struct ata_link *slave = link->ap->slave_link;

  if(link == slave){
		printk("%s %s\n",__func__,__FILE__); /* shouldn't be called on slave link */
		while(1);
	}

  return ata_phys_link_online(link) ||
    (slave && ata_phys_link_online(slave));
}

/**
 *  ata_wait_ready - wait for link to become ready
 *  @link: link to be waited on
 *  @deadline: deadline jiffies for the operation
 *  @check_ready: callback to check link readiness
 *
 *  Wait for @link to become ready.  @check_ready should return
 *  positive number if @link is ready, 0 if it isn't, -ENODEV if
 *  link doesn't seem to be occupied, other errno for other error
 *  conditions.
 *
 *  Transient -ENODEV conditions are allowed for
 *  ATA_TMOUT_FF_WAIT.
 *
 *  LOCKING:
 *  EH context.
 *
 *  RETURNS:
 *  0 if @linke is ready before @deadline; otherwise, -errno.
 */
int ata_wait_ready(struct ata_link *link, unsigned long deadline,
       int (*check_ready)(struct ata_link *link))
{
  unsigned long start = nowticks;
  unsigned long nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
  int warned = 0;

  /* Slave readiness can't be tested separately from master.  On
   * M/S emulation configuration, this function should be called
   * only on the master and it will handle both master and slave.
   */
  if(link == link->ap->slave_link){
		printk("%s %s\n",__func__,__FILE__);
		while(1);
	}
  
  if (time_after(nodev_deadline, deadline))
    nodev_deadline = deadline;

  while (1) {
    unsigned long now = nowticks;
    int ready, tmp;

    ready = tmp = check_ready(link);
    if (ready > 0)
      return 0;

    /* -ENODEV could be transient.  Ignore -ENODEV if link
     * is online.  Also, some SATA devices take a long
     * time to clear 0xff after reset.  For example,
     * HHD424020F7SV00 iVDR needs >= 800ms while Quantum
     * GoVault needs even more than that.  Wait for
     * ATA_TMOUT_FF_WAIT on -ENODEV if link isn't offline.
     *
     * Note that some PATA controllers (pata_ali) explode
     * if status register is read more than once when
     * there's no device attached.
     */
    if (ready == -ENODEV) {
      if (ata_link_online(link))
        ready = 0;
      else if ((link->ap->flags & ATA_FLAG_SATA) &&
         !ata_link_offline(link) &&
         time_before(now, nodev_deadline))
        ready = 0;
    }

    if (ready)
      return ready;
    if (time_after(now, deadline))
      return -EBUSY;

    if (!warned && time_after(now, start + 5 * HZ) &&
        (deadline - now > 3 * HZ)) {
      printk(
        "link is slow to respond, please be patient "
        "(ready=%d)\n", tmp);
      warned = 1;
    }

    msleep(50);
  }
}

/**
 *  ata_sff_wait_ready - sleep until BSY clears, or timeout
 *  @link: SFF link to wait ready status for
 *  @deadline: deadline jiffies for the operation
 *
 *  Sleep until ATA Status register bit BSY clears, or timeout
 *  occurs.
 *
 *  LOCKING:
 *  Kernel thread context (may sleep).
 *
 *  RETURNS:
 *  0 on success, -errno otherwise.
 */
int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline)
{ 
  return ata_wait_ready(link, deadline, ata_sff_check_ready);
}

/**
 *  ata_sff_prereset - prepare SFF link for reset
 *  @link: SFF link to be reset
 *  @deadline: deadline jiffies for the operation
 *
 *  SFF link @link is about to be reset.  Initialize it.  It first
 *  calls ata_std_prereset() and wait for !BSY if the port is
 *  being softreset.
 *
 *  LOCKING:
 *  Kernel thread context (may sleep)
 *
 *  RETURNS:
 *  0 on success, -errno otherwise.
 */
int ata_sff_prereset(struct ata_link *link, unsigned long deadline)
{
  struct ata_eh_context *ehc = &link->eh_context;
  int rc;

  rc = ata_std_prereset(link, deadline);
  if (rc)
    return rc;

  /* if we're about to do hardreset, nothing more to do */
  if (ehc->i.action & ATA_EH_HARDRESET)
    return 0;

  /* wait for !BSY if we don't know that no device is attached */
  if (!ata_link_offline(link)) {
    rc = ata_sff_wait_ready(link, deadline);
    if (rc && rc != -ENODEV) {
      printk("device not ready "
          "(errno=%d), forcing hardreset\n", rc);
      ehc->i.action |= ATA_EH_HARDRESET;
    }
  }

  return 0;
}

/**
 *  ata_devchk - PATA device presence detection
 *  @ap: ATA channel to examine
 *  @device: Device to examine (starting at zero)
 *
 *  This technique was originally described in
 *  Hale Landis's ATADRVR (www.ata-atapi.com), and
 *  later found its way into the ATA/ATAPI spec.
 *
 *  Write a pattern to the ATA shadow registers,
 *  and if a device is present, it will respond by
 *  correctly storing and echoing back the
 *  ATA shadow register contents.
 *
 *  LOCKING:
 *  caller.
 */
static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
{
  struct ata_ioports *ioaddr = &ap->ioaddr;
  u8_t nsect, lbal;

  ap->ops->sff_dev_select(ap, device);

  outb(0x55, ioaddr->nsect_addr);
  outb(0xaa, ioaddr->lbal_addr);

  outb(0xaa, ioaddr->nsect_addr);
  outb(0x55, ioaddr->lbal_addr);

  outb(0x55, ioaddr->nsect_addr);
  outb(0xaa, ioaddr->lbal_addr);

  nsect = inb(ioaddr->nsect_addr);
  lbal = inb(ioaddr->lbal_addr);

  if ((nsect == 0x55) && (lbal == 0xaa))
    return 1; /* we found a device */

  return 0;   /* nothing found */
}

/**
 *  ata_sff_wait_after_reset - wait for devices to become ready after reset
 *  @link: SFF link which is just reset
 *  @devmask: mask of present devices
 *  @deadline: deadline jiffies for the operation
 *
 *  Wait devices attached to SFF @link to become ready after
 *  reset.  It contains preceding 150ms wait to avoid accessing TF
 *  status register too early.
 *
 *  LOCKING:
 *  Kernel thread context (may sleep).
 *
 *  RETURNS:
 *  0 on success, -ENODEV if some or all of devices in @devmask
 *  don't seem to exist.  -errno on other errors.
 */
int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask,
           unsigned long deadline)
{
  struct ata_port *ap = link->ap;
  struct ata_ioports *ioaddr = &ap->ioaddr;
  unsigned int dev0 = devmask & (1 << 0);
  unsigned int dev1 = devmask & (1 << 1);
  int rc, ret = 0;

  msleep(ATA_WAIT_AFTER_RESET);

  /* always check readiness of the master device */
  rc = ata_sff_wait_ready(link, deadline);
  /* -ENODEV means the odd clown forgot the D7 pulldown resistor
   * and TF status is 0xff, bail out on it too.
   */
  if (rc)
    return rc;

  /* if device 1 was found in ata_devchk, wait for register
   * access briefly, then wait for BSY to clear.
   */
  if (dev1) {
    int i;

    ap->ops->sff_dev_select(ap, 1);

    /* Wait for register access.  Some ATAPI devices fail
     * to set nsect/lbal after reset, so don't waste too
     * much time on it.  We're gonna wait for !BSY anyway.
     */
    for (i = 0; i < 2; i++) {
      u8_t nsect, lbal;

      nsect = inb(ioaddr->nsect_addr);
      lbal = inb(ioaddr->lbal_addr);
      if ((nsect == 1) && (lbal == 1))
        break;
      msleep(50); /* give drive a breather */
    }

    rc = ata_sff_wait_ready(link, deadline);
    if (rc) {
      if (rc != -ENODEV)
        return rc;
      ret = rc;
    }
  }

  /* is all this really necessary? */
  ap->ops->sff_dev_select(ap, 0);
  if (dev1)
    ap->ops->sff_dev_select(ap, 1);
  if (dev0)
    ap->ops->sff_dev_select(ap, 0);

  return ret;
}

static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
           unsigned long deadline)
{
  struct ata_ioports *ioaddr = &ap->ioaddr;

  printk("ata: bus reset via SRST\n");

  /* software reset.  causes dev0 to be selected */
  outb(ap->ctl, ioaddr->ctl_addr);
  udelay(20); /* FIXME: flush */
  outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
  udelay(20); /* FIXME: flush */
  outb(ap->ctl, ioaddr->ctl_addr);
  ap->last_ctl = ap->ctl;

  /* wait the port to become ready */
  return ata_sff_wait_after_reset(&ap->link, devmask, deadline);
}

/**
 *  ata_dev_classify - determine device type based on ATA-spec signature
 *  @tf: ATA taskfile register set for device to be identified
 *
 *  Determine from taskfile register contents whether a device is
 *  ATA or ATAPI, as per "Signature and persistence" section
 *  of ATA/PI spec (volume 1, sect 5.14).
 *
 *  LOCKING:
 *  None.
 *
 *  RETURNS:
 *  Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
 *  %ATA_DEV_UNKNOWN the event of failure.
 */
unsigned int ata_dev_classify(const struct ata_taskfile *tf)
{
  /* Apple's open source Darwin code hints that some devices only
   * put a proper signature into the LBA mid/high registers,
   * So, we only check those.  It's sufficient for uniqueness.
   *
   * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
   * signatures for ATA and ATAPI devices attached on SerialATA,
   * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
   * spec has never mentioned about using different signatures
   * for ATA/ATAPI devices.  Then, Serial ATA II: Port
   * Multiplier specification began to use 0x69/0x96 to identify
   * port multpliers and 0x3c/0xc3 to identify SEMB device.
   * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
   * 0x69/0x96 shortly and described them as reserved for
   * SerialATA.
   *
   * We follow the current spec and consider that 0x69/0x96
   * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
   * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
   * SEMB signature.  This is worked around in
   * ata_dev_read_id().
   */
  if ((tf->lbam == 0) && (tf->lbah == 0)) {
    //printk("found ATA device by sig\n");
    return ATA_DEV_ATA;
  }

  if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
    //printk("found ATAPI device by sig\n");
    return ATA_DEV_ATAPI;
  }

  if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
    //printk("found PMP device by sig\n");
    return ATA_DEV_PMP;
  }

  if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
    //printk("found SEMB device by sig (could be ATA device)\n");
    return ATA_DEV_SEMB;
  }

  printk("unknown device\n");
  return ATA_DEV_UNKNOWN;
}

/**
 *  ata_sff_dev_classify - Parse returned ATA device signature
 *  @dev: ATA device to classify (starting at zero)
 *  @present: device seems present
 *  @r_err: Value of error register on completion
 *
 *  After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
 *  an ATA/ATAPI-defined set of values is placed in the ATA
 *  shadow registers, indicating the results of device detection
 *  and diagnostics.
 *
 *  Select the ATA device, and read the values from the ATA shadow
 *  registers.  Then parse according to the Error register value,
 *  and the spec-defined values examined by ata_dev_classify().
 *
 *  LOCKING:
 *  caller.
 *
 *  RETURNS:
 *  Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
 */
unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
          u8_t *r_err)
{
  struct ata_port *ap = dev->link->ap;
  struct ata_taskfile tf;
  unsigned int class;
  u8_t err;

  ap->ops->sff_dev_select(ap, dev->devno);

  memset(&tf, 0, sizeof(tf));

  ap->ops->sff_tf_read(ap, &tf);
  err = tf.feature;
  if (r_err)
    *r_err = err;

  /* see if device passed diags: continue and warn later */
  if (err == 0)
    /* diagnostic fail : do nothing _YET_ */
    dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
  else if (err == 1)
    /* do nothing */ ;
  else if ((dev->devno == 0) && (err == 0x81))
    /* do nothing */ ;
  else
    return ATA_DEV_NONE;

  /* determine if device is ATA or ATAPI */
  class = ata_dev_classify(&tf);

  if (class == ATA_DEV_UNKNOWN) {
    /* If the device failed diagnostic, it's likely to
     * have reported incorrect device signature too.
     * Assume ATA device if the device seems present but
     * device signature is invalid with diagnostic
     * failure.
     */
    if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
      class = ATA_DEV_ATA;
    else
      class = ATA_DEV_NONE;
  } else if ((class == ATA_DEV_ATA) &&
       (ap->ops->sff_check_status(ap) == 0))
    class = ATA_DEV_NONE;

  return class;
}

int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
          unsigned long deadline)
{
  struct ata_port *ap = link->ap;
  unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
  unsigned int devmask = 0;
  int rc;
  u8_t err;

  /* determine if device 0/1 are present */
  if (ata_devchk(ap, 0))
    devmask |= (1 << 0);
  if (slave_possible && ata_devchk(ap, 1))
    devmask |= (1 << 1);

  /* select device 0 again */
  ap->ops->sff_dev_select(ap, 0);

  /* issue bus reset */
  //printk("about to softreset, devmask=%x\n", devmask);
  rc = ata_bus_softreset(ap, devmask, deadline);
  /* if link is occupied, -ENODEV too is an error */
  if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
    printk("SRST failed (errno=%d)\n", rc);
    return rc;
  }

  /* determine by signature whether we have ATA or ATAPI devices */
  classes[0] = ata_sff_dev_classify(&link->device[0],
            devmask & (1 << 0), &err);
  if (slave_possible && err != 0x81)
    classes[1] = ata_sff_dev_classify(&link->device[1],
              devmask & (1 << 1), &err);

  //printk("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
  return 0;
}

/**
 *  ata_sff_tf_read - input device's ATA taskfile shadow registers
 *  @ap: Port from which input is read
 *  @tf: ATA taskfile register set for storing input
 *
 *  Reads ATA taskfile registers for currently-selected device
 *  into @tf. Assumes the device has a fully SFF compliant task file
 *  layout and behaviour. If you device does not (eg has a different
 *  status method) then you will need to provide a replacement tf_read
 *
 *  LOCKING:
 *  Inherited from caller.
 */
void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
{
  struct ata_ioports *ioaddr = &ap->ioaddr;

  tf->command = ata_sff_check_status(ap);
  tf->feature = inb(ioaddr->error_addr);
  tf->nsect = inb(ioaddr->nsect_addr);
  tf->lbal = inb(ioaddr->lbal_addr);
  tf->lbam = inb(ioaddr->lbam_addr);
  tf->lbah = inb(ioaddr->lbah_addr);
  tf->device = inb(ioaddr->device_addr);

  if (tf->flags & ATA_TFLAG_LBA48) {
    if (likely(ioaddr->ctl_addr)) {
      outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
      tf->hob_feature = inb(ioaddr->error_addr);
      tf->hob_nsect = inb(ioaddr->nsect_addr);
      tf->hob_lbal = inb(ioaddr->lbal_addr);
      tf->hob_lbam = inb(ioaddr->lbam_addr);
      tf->hob_lbah = inb(ioaddr->lbah_addr);
      outb(tf->ctl, ioaddr->ctl_addr);
      ap->last_ctl = tf->ctl;
    } else{
			printk("%s %s\n",__func__,__FILE__);
			while(1);
		}
  }
}

/**
 *  ata_pio_need_iordy  - check if iordy needed
 *  @adev: ATA device
 *
 *  Check if the current speed of the device requires IORDY. Used
 *  by various controllers for chip configuration.
 */
unsigned int ata_pio_need_iordy(const struct ata_device *adev)
{
  /* Don't set IORDY if we're preparing for reset.  IORDY may
   * lead to controller lock up on certain controllers if the
   * port is not occupied.  See bko#11703 for details.
   */
  if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
    return 0;
  /* Controller doesn't support IORDY.  Probably a pointless
   * check as the caller should know this.
   */
  if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
    return 0;
  /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6.  */
  if (ata_id_is_cfa(adev->id)
      && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
    return 0;
  /* PIO3 and higher it is mandatory */
  if (adev->pio_mode > XFER_PIO_2)
    return 1;
  /* We turn it on when possible */
  if (ata_id_has_iordy(adev->id))
    return 1;
  return 0;
}

/**
 *  piix_set_piomode - Initialize host controller PATA PIO timings
 *  @ap: Port whose timings we are configuring
 *  @adev: um
 *
 *  Set PIO mode for device, in host controller PCI config space.
 *
 *  LOCKING:
 *  None (inherited from caller).
 */

static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
  struct pci_device *dev = ap->host->dev;
  unsigned int pio  = adev->pio_mode - XFER_PIO_0;
  unsigned int is_slave = (adev->devno != 0);
  unsigned int master_port= ap->port_no ? 0x42 : 0x40;
  unsigned int slave_port = 0x44;
  u16_t master_data;
  u8_t slave_data;
  u8_t udma_enable;
  int control = 0;

  /*
   *  See Intel Document 298600-004 for the timing programing rules
   *  for ICH controllers.
   */

  static const   /* ISP  RTC */
  u8_t timings[][2] = { { 0, 0 },
          { 0, 0 },
          { 1, 0 },
          { 2, 1 },
          { 2, 3 }, };

  if (pio >= 2)
    control |= 1; /* TIME1 enable */
  if (ata_pio_need_iordy(adev))
    control |= 2; /* IE enable */

  /* Intel specifies that the PPE functionality is for disk only */
  if (adev->class == ATA_DEV_ATA)
    control |= 4; /* PPE enable */

  /* PIO configuration clears DTE unconditionally.  It will be
   * programmed in set_dmamode which is guaranteed to be called
   * after set_piomode if any DMA mode is available.
   */
  pci_read_config_word(dev->bus,dev->devfn,master_port, &master_data);
  if (is_slave) {
		//printk("Do we have a slave ata_device? %s %s\n",
		//	__func__,__FILE__);
    /* clear TIME1|IE1|PPE1|DTE1 */
    master_data &= 0xff0f;
    /* Enable SITRE (separate slave timing register) */
    master_data |= 0x4000;
    /* enable PPE1, IE1 and TIME1 as needed */
    master_data |= (control << 4);
    pci_read_config_byte(dev->bus,dev->devfn,slave_port, &slave_data);
    slave_data &= (ap->port_no ? 0x0f : 0xf0);
    /* Load the timing nibble for this slave */
    slave_data |= ((timings[pio][0] << 2) | timings[pio][1])
            << (ap->port_no ? 4 : 0);
  } else {
    /* clear ISP|RCT|TIME0|IE0|PPE0|DTE0 */
    master_data &= 0xccf0;
    /* Enable PPE, IE and TIME as appropriate */
    master_data |= control;
    /* load ISP and RCT */
    master_data |=
      (timings[pio][0] << 12) |
      (timings[pio][1] << 8);
  }
  pci_write_config_word(dev->bus,dev->devfn,master_port, master_data);
  if (is_slave)
    pci_write_config_byte(dev->bus,dev->devfn,slave_port, slave_data);

  /* Ensure the UDMA bit is off - it will be turned back on if
     UDMA is selected */

  if (ap->udma_mask) {
    pci_read_config_byte(dev->bus,dev->devfn,0x48, &udma_enable);
    udma_enable &= ~(1 << (2 * ap->port_no + adev->devno));
    pci_write_config_byte(dev->bus,dev->devfn,0x48, udma_enable);
  }
}

/**
 *  piix_pata_prereset - prereset for PATA host controller
 *  @link: Target link
 *  @deadline: deadline jiffies for the operation
 *
 *  LOCKING:
 *  None (inherited from caller).
 */
static struct pci_bits piix_enable_bits[] = {
  { 0x41U, 1U, 0x80UL, 0x80UL },  /* port 0 */
  { 0x43U, 1U, 0x80UL, 0x80UL },  /* port 1 */
};
/* move to PCI subsystem */
int pci_test_config_bits(struct pci_device *pdev, const struct pci_bits *bits)
{
  unsigned long tmp = 0;

  switch (bits->width) {
  case 1: {
    u8_t tmp8 = 0;
    pci_read_config_byte(pdev->bus,pdev->devfn,bits->reg, &tmp8);
    tmp = tmp8;
    break;
  }
  case 2: {
    u16_t tmp16 = 0;
    pci_read_config_word(pdev->bus,pdev->devfn,bits->reg, &tmp16);
    tmp = tmp16;
    break;
  }
  case 4: {
    u32_t tmp32 = 0;
    pci_read_config_dword(pdev->bus,pdev->devfn,bits->reg, &tmp32);
    tmp = tmp32;
    break;
  }

  default:
    return -EINVAL;
  }

  tmp &= bits->mask;

  return (tmp == bits->val) ? 1 : 0;
}
static int piix_pata_prereset(struct ata_link *link, unsigned long deadline)
{
  struct ata_port *ap = link->ap;
  struct pci_device *pdev = ap->host->dev;

  if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->port_no]))
    return -ENOENT;
  return ata_sff_prereset(link, deadline);
}

/**
 *  ata_sff_data_xfer - Transfer data by PIO
 *  @dev: device to target
 *  @buf: data buffer
 *  @buflen: buffer length
 *  @rw: read/write
 *
 *  Transfer data from/to the device data register by PIO.
 *
 *  LOCKING:
 *  Inherited from caller.
 *
 *  RETURNS:
 *  Bytes consumed.
 */
unsigned int ata_sff_data_xfer(struct ata_device *dev, unsigned char *buf,
             unsigned int buflen, int rw)
{
  struct ata_port *ap = dev->link->ap;
  void *data_addr = ap->ioaddr.data_addr;
  unsigned int words = buflen >> 1;

  /* Transfer multiple of 2 bytes */
  if (rw == READ)
    insw((unsigned long)data_addr, buf, words);
  else
    outsw((unsigned long)data_addr, buf, words);

  /* Transfer trailing byte, if any. */
  if (unlikely(buflen & 0x01)) {
    unsigned char pad[2];

    /* Point buf to the tail of buffer */
    buf += buflen - 1;

    /*
     * Use io*16_rep() accessors here as well to avoid pointlessly
     * swapping bytes to and from on the big endian machines...
     */
    if (rw == READ) {
      insw((unsigned long)data_addr, pad, 1);
      *buf = pad[0];
    } else {
      pad[0] = *buf;
      outsw((unsigned long)data_addr, pad, 1);
    }
    words++;
  }

  return words << 1;
}

/**
 *	ata_sff_data_xfer32 - Transfer data by PIO
 *	@dev: device to target
 *	@buf: data buffer
 *	@buflen: buffer length
 *	@rw: read/write
 *
 *	Transfer data from/to the device data register by PIO using 32bit
 *	I/O operations.
 *
 *	LOCKING:
 *	Inherited from caller.
 *
 *	RETURNS:
 *	Bytes consumed.
 */

unsigned int ata_sff_data_xfer32(struct ata_device *dev, unsigned char *buf,
			       unsigned int buflen, int rw)
{
	struct ata_port *ap = dev->link->ap;
	void *data_addr = ap->ioaddr.data_addr;
	unsigned int words = buflen >> 2;
	int slop = buflen & 3;

	if (!(ap->pflags & ATA_PFLAG_PIO32))
		return ata_sff_data_xfer(dev, buf, buflen, rw);

	/* Transfer multiple of 4 bytes */
	if (rw == READ)
		insl((unsigned long)data_addr, buf, words);
	else
		outsl((unsigned long)data_addr, buf, words);

	/* Transfer trailing bytes, if any */
	if (unlikely(slop)) {
		unsigned char pad[4];

		/* Point buf to the tail of buffer */
		buf += buflen - slop;

		/*
		 * Use io*_rep() accessors here as well to avoid pointlessly
		 * swapping bytes to and from on the big endian machines...
		 */
		if (rw == READ) {
			if (slop < 3)
				insw((unsigned long)data_addr, pad, 1);
			else
				insl((unsigned long)data_addr, pad, 1);
			memcpy(buf, pad, slop);
		} else {
			memcpy(pad, buf, slop);
			if (slop < 3)
				outsw((unsigned long)data_addr, pad, 1);
			else
				outsl((unsigned long)data_addr, pad, 1);
		}
	}
	return (buflen + 1) & ~1;
}

/**
 *  ata_bmdma_start - Start a PCI IDE BMDMA transaction
 *  @qc: Info associated with this ATA transaction.
 *
 *  LOCKING:
 *  spin_lock_irqsave(host lock)
 */
void ata_bmdma_start(struct ata_queued_cmd *qc)
{
  struct ata_port *ap = qc->ap;
  u8_t dmactl;

  /* start host DMA transaction */
  dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
  outb(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);

  /* Strictly, one may wish to issue an ioread8() here, to
   * flush the mmio write.  However, control also passes
   * to the hardware at this point, and it will interrupt
   * us when we are to resume control.  So, in effect,
   * we don't care when the mmio write flushes.
   * Further, a read of the DMA status register _immediately_
   * following the write may not be what certain flaky hardware
   * is expected, so I think it is best to not add a readb()
   * without first all the MMIO ATA cards/mobos.
   * Or maybe I'm just being paranoid.
   *
   * FIXME: The posting of this write means I/O starts are
   * unneccessarily delayed for MMIO
   */
}

/**
 *  ata_bmdma_stop - Stop PCI IDE BMDMA transfer
 *  @qc: Command we are ending DMA for
 *
 *  Clears the ATA_DMA_START flag in the dma control register
 *
 *  May be used as the bmdma_stop() entry in ata_port_operations.
 *
 *  LOCKING:
 *  spin_lock_irqsave(host lock)
 */
void ata_bmdma_stop(struct ata_queued_cmd *qc)
{
  struct ata_port *ap = qc->ap;
  void *mmio = ap->ioaddr.bmdma_addr;

  /* clear start/stop bit */
  outb(inb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
     mmio + ATA_DMA_CMD);

  /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
  ata_sff_dma_pause(ap);
}

/**
 *  ata_bmdma_status - Read PCI IDE BMDMA status
 *  @ap: Port associated with this ATA transaction.
 *    
 *  Read and return BMDMA status register.
 *
 *  May be used as the bmdma_status() entry in ata_port_operations.
 *
 *  LOCKING:
 *  spin_lock_irqsave(host lock)
 */
u8_t ata_bmdma_status(struct ata_port *ap)
{
  return inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
}

static u8_t piix_vmw_bmdma_status(struct ata_port *ap)
{
  return ata_bmdma_status(ap) & ~ATA_DMA_ERR;
}

const struct ata_port_operations ata_sff_port_ops = {
  .qc_prep    = ata_sff_qc_prep,
  .qc_issue   = ata_sff_qc_issue,
  .qc_fill_rtf    = ata_sff_qc_fill_rtf,

  .freeze     = ata_sff_freeze,
  .thaw     = ata_sff_thaw,
  .prereset   = ata_sff_prereset,
  .softreset    = ata_sff_softreset,
  .hardreset    = sata_sff_hardreset,
  .postreset    = ata_sff_postreset,

	.error_handler    = ata_sff_error_handler,
  .post_internal_cmd  = ata_sff_post_internal_cmd,

	.sff_dev_select   = ata_sff_dev_select,

  .sff_irq_on   = ata_sff_irq_on,
  .sff_irq_clear    = ata_sff_irq_clear,
  .sff_check_status = ata_sff_check_status,
  .sff_tf_load    = ata_sff_tf_load,
  .sff_tf_read    = ata_sff_tf_read,

	.sff_exec_command = ata_sff_exec_command,
};

const struct ata_port_operations ata_bmdma_port_ops = {
  .inherits   = &ata_sff_port_ops,
/*  
  .mode_filter    = ata_bmdma_mode_filter,
*/
  .bmdma_setup    = ata_bmdma_setup,
  .bmdma_start    = ata_bmdma_start,
  .bmdma_stop   	= ata_bmdma_stop,
  .bmdma_status   = ata_bmdma_status,
};

const struct ata_port_operations ata_bmdma32_port_ops = {
  .inherits   = &ata_bmdma_port_ops,
  .sff_data_xfer    = ata_sff_data_xfer32,
  .port_start   = ata_sff_port_start32,
};
static struct ata_port_operations piix_pata_ops = {
  .inherits   = &ata_bmdma32_port_ops,
/*
  .cable_detect   = ata_cable_40wire,
*/
  .set_piomode    = piix_set_piomode,
/*
  .set_dmamode    = piix_set_dmamode,
*/
  .prereset   = piix_pata_prereset,

};        

static struct ata_port_operations piix_vmw_ops = {
  .inherits   = &piix_pata_ops,
  .bmdma_status   = piix_vmw_bmdma_status,
};

static struct ata_port_operations ich_pata_ops = {
  .inherits   = &piix_pata_ops,
//  .cable_detect   = ich_pata_cable_detect,
//  .set_dmamode    = ich_set_dmamode,
};

static struct ata_port_operations piix_sata_ops = {
  .inherits   = &ata_bmdma32_port_ops,
};

#if 0
static struct ata_port_operations piix_sidpr_sata_ops = {
  .inherits   = &piix_sata_ops,
/*
  .hardreset    = sata_std_hardreset,
  .scr_read   = piix_sidpr_scr_read,
  .scr_write    = piix_sidpr_scr_write,
*/
};
#endif

static struct ata_port_info piix_port_info[] = {
	[piix_pata_mwdma] = 	/* PIIX3 MWDMA only */
	{
		.flags		= PIIX_PATA_FLAGS,
		.pio_mask	= ATA_PIO4,
		.mwdma_mask	= ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
		.port_ops	= &piix_pata_ops,
	},

	[piix_pata_33] =	/* PIIX4 at 33MHz */
	{
		.flags		= PIIX_PATA_FLAGS,
		.pio_mask	= ATA_PIO4,
		.mwdma_mask	= ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
		.udma_mask	= ATA_UDMA2,
		.port_ops	= &piix_pata_ops,
	},

	[ich_pata_33] = 	/* ICH0 - ICH at 33Mhz*/
	{
		.flags		= PIIX_PATA_FLAGS,
		.pio_mask 	= ATA_PIO4,
		.mwdma_mask	= ATA_MWDMA12_ONLY, /* Check: maybe MWDMA0 is ok  */
		.udma_mask	= ATA_UDMA2,
		.port_ops	= &ich_pata_ops,
	},

	[ich_pata_66] = 	/* ICH controllers up to 66MHz */
	{
		.flags		= PIIX_PATA_FLAGS,
		.pio_mask 	= ATA_PIO4,
		.mwdma_mask	= ATA_MWDMA12_ONLY, /* MWDMA0 is broken on chip */
		.udma_mask	= ATA_UDMA4,
		.port_ops	= &ich_pata_ops,
	},

	[ich_pata_100] =
	{
		.flags		= PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
		.pio_mask	= ATA_PIO4,
		.mwdma_mask	= ATA_MWDMA12_ONLY,
		.udma_mask	= ATA_UDMA5,
		.port_ops	= &ich_pata_ops,
	},

	[ich_pata_100_nomwdma1] =
	{
		.flags		= PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
		.pio_mask	= ATA_PIO4,
		.mwdma_mask	= ATA_MWDMA2_ONLY,
		.udma_mask	= ATA_UDMA5,
		.port_ops	= &ich_pata_ops,
	},

	[ich5_sata] =
	{
		.flags		= PIIX_SATA_FLAGS,
		.pio_mask	= ATA_PIO4,
		.mwdma_mask	= ATA_MWDMA2,
		.udma_mask	= ATA_UDMA6,
		.port_ops	= &piix_sata_ops,
	},

	[ich6_sata] =
	{
		.flags		= PIIX_SATA_FLAGS,
		.pio_mask	= ATA_PIO4,
		.mwdma_mask	= ATA_MWDMA2,
		.udma_mask	= ATA_UDMA6,
		.port_ops	= &piix_sata_ops,
	},

	[ich6m_sata] =
	{
		.flags		= PIIX_SATA_FLAGS,
		.pio_mask	= ATA_PIO4,
		.mwdma_mask	= ATA_MWDMA2,
		.udma_mask	= ATA_UDMA6,
		.port_ops	= &piix_sata_ops,
	},

	[ich8_sata] =
	{
		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR,
		.pio_mask	= ATA_PIO4,
		.mwdma_mask	= ATA_MWDMA2,
		.udma_mask	= ATA_UDMA6,
		.port_ops	= &piix_sata_ops,
	},

	[ich8_2port_sata] =
	{
		.flags		= PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR,
		.pio_mask	= ATA_PIO4,
		.mwdma_mask	= ATA_MWDMA2,
		.udma_mask	= ATA_UDMA6,
		.port_ops	= &piix_sata_ops,
	},

	[tolapai_sata] =
	{
		.flags		= PIIX_SATA_FLAGS,
		.pio_mask	= ATA_PIO4,
		.mwdma_mask	= ATA_MWDMA2,
		.udma_mask	= ATA_UDMA6,
		.port_ops	= &piix_sata_ops,
	},

	[ich8m_apple_sata] =
	{
		.flags		= PIIX_SATA_FLAGS,
		.pio_mask	= ATA_PIO4,
		.mwdma_mask	= ATA_MWDMA2,
		.udma_mask	= ATA_UDMA6,
		.port_ops	= &piix_sata_ops,
	},

	[piix_pata_vmw] =
	{
		.flags		= PIIX_PATA_FLAGS,
		.pio_mask	= ATA_PIO4,
		.mwdma_mask	= ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
		.udma_mask	= ATA_UDMA2,
		.port_ops	= &piix_vmw_ops,
	},

};

static inline const struct pci_device_id *
pci_match_one_device(const struct pci_device_id *id, const struct pci_device *dev)
{
  if ((id->vendor == PCI_ANY_ID || id->vendor == dev->vendor) &&
      (id->device == PCI_ANY_ID || id->device == dev->device) &&
      (id->subvendor == PCI_ANY_ID || id->subvendor == dev->subsystem_vendor) &&
      (id->subdevice == PCI_ANY_ID || id->subdevice == dev->subsystem_device) &&
      !((id->class ^ dev->class) & id->class_mask))
    return id; 
  return NULL;
}

const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
           struct pci_device *dev)
{ 
  if (ids) {
    while (ids->vendor || ids->subvendor || ids->class_mask) {
      if (pci_match_one_device(ids, dev))
        return ids;
      ids++;
    }
  }
  return NULL;
}

int pci_enable_resources(struct pci_device *dev, unsigned int mask)
{
  u16_t cmd, old_cmd;
  int i;
  struct resource *r;
    
  pci_read_config_word(dev->bus,dev->devfn,PCI_COMMAND, &cmd);
  old_cmd = cmd;
    
  for (i = 0; i < PCI_NUM_RESOURCES; i++) {
    if (!(mask & (1 << i)))
      continue;

    r = &dev->resource[i];
  
    if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
      continue;
    if ((i == PCI_ROM_RESOURCE) &&
        (!(r->flags & IORESOURCE_ROM_ENABLE)))
      continue;
  
    if (r->flags & IORESOURCE_IO)
      cmd |= PCI_COMMAND_IO;
    if (r->flags & IORESOURCE_MEM)
      cmd |= PCI_COMMAND_MEMORY;
  }

  if (cmd != old_cmd) {
    printk("enabling device (%04x -> %04x)\n",
       old_cmd, cmd);
    pci_write_config_word(dev->bus, dev->devfn,PCI_COMMAND, cmd);
  }
  return 0;
}

int pci_enable_device(struct pci_device *dev)
{
	unsigned flags = IORESOURCE_MEM | IORESOURCE_IO;
	unsigned bars = 0;
	int i;

  for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
    if (dev->resource[i].flags & flags)
      bars |= (1 << i);
	
	return pci_enable_resources(dev,bars);
}

struct ata_link *ata_dev_phys_link(struct ata_device *dev)
{
  return dev->link;
}

void ata_dev_init(struct ata_device *dev)
{
  struct ata_link *link = ata_dev_phys_link(dev);

  /* SATA spd limit is bound to the attached device, reset together */
  link->sata_spd_limit = link->hw_sata_spd_limit;
  link->sata_spd = 0;

  /* High bits of dev->flags are used to record warm plug
   * requests which occur asynchronously.  Synchronize using
   * host lock.
   */
  dev->flags &= ~ATA_DFLAG_INIT_MASK;
  dev->horkage = 0;

  memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
         ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
  dev->pio_mask = UINT_MAX;
  dev->mwdma_mask = UINT_MAX;
  dev->udma_mask = UINT_MAX;
}

void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
{
  int i;

  /* clear everything except for devices */
  memset(link, 0, offsetof(struct ata_link, device[0]));

  link->ap = ap;
  link->pmp = pmp;
  link->active_tag = ATA_TAG_POISON;
  link->hw_sata_spd_limit = UINT_MAX;

  for (i = 0; i < ATA_MAX_DEVICES; i++) {
    struct ata_device *dev = &link->device[i];

    dev->link = link;
    dev->devno = dev - link->device;
    ata_dev_init(dev);
  }
}

struct ata_port *ata_port_alloc(struct ata_host *host)
{
	struct ata_port *ap;

	ap = (struct ata_port*)kmalloc(sizeof(struct ata_port),0);
	if(!ap)
		return NULL;

	ap->host = host;
	ap->flags = ATA_FLAG_DISABLED;
	ap->dev = host->dev;

	ata_link_init(ap,&ap->link,0);	

	return ap;
}

struct ata_host *ata_host_alloc(struct pci_device *dev, int max_ports)
{
  struct ata_host *host;
  size_t sz;
  int i;

  sz = sizeof(struct ata_host); 
  host = (struct ata_host*)kmalloc(sz, 0);
  if (!host){
		printk("Failed to allocate ata_host in %s of %s\n",
			__func__,__FILE__);
		while(1);
	}

	/* by this pointer, we can access the driver from the device structure. */
	dev->driver_data = (void *)host;

  host->dev = dev;
  host->n_ports = max_ports;

  /* allocate ports bound to this host */
  for (i = 0; i < max_ports; i++) {
    struct ata_port *ap;

    ap = ata_port_alloc(host);
    if (!ap){
			printk("can not allocate memory for ap in %s of %s\n",
				__func__,__FILE__);
			while(1);
		}

    ap->port_no = i;
    host->ports[i] = ap;
  }

  return host;
}

struct ata_host *ata_host_alloc_pinfo(struct pci_device *dev,
              struct ata_port_info ** ppi,
              int n_ports)
{
  const struct ata_port_info *pi;
  struct ata_host *host;
  int i, j;

  host = ata_host_alloc(dev, n_ports);
  if (!host){
		printk("Failed to allocate ata_host in %s of %s\n",
			__func__,__FILE__);
		while(1);
	}

  for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
    struct ata_port *ap = host->ports[i];

    if (ppi[j])
      pi = ppi[j++];

		ap->print_id = i;
    ap->pio_mask = pi->pio_mask;
    ap->mwdma_mask = pi->mwdma_mask;
    ap->udma_mask = pi->udma_mask;
    ap->flags |= pi->flags;
    ap->link.flags |= pi->link_flags;
    ap->ops = pi->port_ops;

    if (!host->ops)
      host->ops = pi->port_ops;
  }

  return host;
}

static int ata_resources_present(struct pci_device *pdev, int port)
{
  int i;

  /* Check the PCI resources for this channel are enabled */
  port = port * 2;
  for (i = 0; i < 2; i++) {
    if (pci_resource_start(pdev, port + i) == 0 ||
        pci_resource_len(pdev, port + i) == 0)
      return 0;
  }
  return 1;
}

static void setup_iomap(struct pci_device *pdev,u16_t mask,struct ata_host *host)
{
	int i;
	for(i=0;i<DEVICE_COUNT_RESOURCE;i++){
    unsigned long len;

    if (!(mask & (1 << i)))
      continue;

    len = pci_resource_len(pdev, i);
    if (!len || i>=PCI_ROM_RESOURCE){
			printk("Invalid PCI resource. %s %s\n",
					__func__,__FILE__);
			while(1);
		}

		/* right now, I do not want to do any extra mapping. */
		host->iomap[i] = (void*)pci_resource_start(pdev,i);
	}
}

static void ata_sff_std_ports(struct ata_ioports *ioaddr)
{
  ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
  ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
  ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
  ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
  ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
  ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
  ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
  ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
  ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
  ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
}

void ata_pci_sff_init_host(struct ata_host *host)
{
  struct pci_device *pdev = host->dev;
  unsigned int mask = 0;
  int i;

  /* request, iomap BARs and init port addresses accordingly */
  for (i = 0; i < 2; i++) {
    struct ata_port *ap = host->ports[i];
    int base = i * 2;
    void **iomap;

    if (!ata_resources_present(pdev, i)) {
			printk("resource not present for PCI device.%s\n",
					__FILE__);
			while(1);
    }

    setup_iomap(pdev, 0x3 << base,host);
    iomap = host->iomap;

    ap->ioaddr.cmd_addr = iomap[base];
    ap->ioaddr.altstatus_addr =
    ap->ioaddr.ctl_addr = (void *)
      ((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS);
    ata_sff_std_ports(&ap->ioaddr);

    printk("ATA port info: cmd %x ctl %x\n",
      (unsigned long)pci_resource_start(pdev, base),
      (unsigned long)pci_resource_start(pdev, base + 1));

    mask |= 1 << i;
  }

  if (!mask) {
    printk("no available native port\n");
		while(1);
  }
}

void pci_set_dma_mask(struct pci_device *pdev,u64_t mask)
{
	pdev->dma_mask = mask;
}

void ata_pci_bmdma_init(struct ata_host *host)
{
  struct pci_device *pdev = host->dev;
  int i;

  /* No BAR4 allocation: No DMA */
  if (pci_resource_start(pdev, 4) == 0){
		printk("No DMA available. This is impossible for hard drives\n");
		while(1);
	}

  /* TODO: If we get no DMA mask we should fall back to PIO */
  pci_set_dma_mask(pdev, ATA_DMA_MASK);

  /* request and iomap DMA region */
	setup_iomap(pdev,1<<4,host);

  for (i = 0; i < 2; i++) {
    struct ata_port *ap = host->ports[i];
    void *bmdma = host->iomap[4] + 8 * i;

    ap->ioaddr.bmdma_addr = bmdma;
    if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
        (inb(bmdma + 2) & 0x80))
      host->flags |= ATA_HOST_SIMPLEX;

    printk("ata port: %d bmdma %x\n",i,
        (unsigned long)pci_resource_start(pdev, 4) + 8 * i);
  }

}

void ata_pci_sff_prepare_host(struct pci_device *pdev,
					struct ata_port_info **ppi, struct ata_host **r_host)
{
	struct ata_host *host;

	host = ata_host_alloc_pinfo(pdev,ppi,2);
	if(!host){
		printk("Failed to allocate ata_host in %s of %s\n",
			__func__,__FILE__);
		while(1);
	}

	ata_pci_sff_init_host(host);

	/* DMA initialization */
	ata_pci_bmdma_init(host);

	*r_host = host;
}

static void __pci_set_master(struct pci_device *dev, int enable)
{
  u16_t old_cmd, cmd;

  pci_read_config_word(dev->bus,dev->devfn,PCI_COMMAND, &old_cmd);
  if (enable)
    cmd = old_cmd | PCI_COMMAND_MASTER;
  else
    cmd = old_cmd & ~PCI_COMMAND_MASTER;
  if (cmd != old_cmd) {
    printk("%s bus mastering\n",
      enable ? "enabling" : "disabling");
    pci_write_config_word(dev->bus,dev->devfn,PCI_COMMAND, cmd);
  }
  dev->is_busmaster = enable;
}

/*
 *  If we set up a device for bus mastering, we need to check the latency
 *  timer as certain crappy BIOSes forget to set it properly.
 */
unsigned int pcibios_max_latency = 255;

void pcibios_set_master(struct pci_device *dev)
{
  u8_t lat;
  pci_read_config_byte(dev->bus,dev->devfn,PCI_LATENCY_TIMER, &lat);
  if (lat < 16)
    lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
  else if (lat > pcibios_max_latency)
    lat = pcibios_max_latency;
  else
    return;
  printk("setting latency timer to %d\n", lat);
  pci_write_config_byte(dev->bus,dev->devfn,PCI_LATENCY_TIMER, lat);
}

void pci_set_master(struct pci_device *dev)
{
  __pci_set_master(dev, 1);
  pcibios_set_master(dev);
}

/**
 *  ata_sff_irq_status - Check if the device is busy
 *  @ap: port where the device is
 *
 *  Determine if the port is currently busy. Uses altstatus
 *  if available in order to avoid clearing shared IRQ status
 *  when finding an IRQ source. Non ctl capable devices don't
 *  share interrupt lines fortunately for us.
 *
 *  LOCKING:
 *  Inherited from caller.
 */
static u8_t ata_sff_irq_status(struct ata_port *ap)
{
  u8_t status;

  if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
    status = ata_sff_altstatus(ap);
    /* Not us: We are busy */
    if (status & ATA_BUSY)
      return status;
  }
  /* Clear INTRQ latch */
  status = ap->ops->sff_check_status(ap);
  return status;
}

/**
 *	ata_sff_host_intr - Handle host interrupt for given (port, task)
 *	@ap: Port on which interrupt arrived (possibly...)
 *	@qc: Taskfile currently active in engine
 *
 *	Handle host interrupt for given queued command.  Currently,
 *	only DMA interrupts are handled.  All other commands are
 *	handled via polling with interrupts disabled (nIEN bit).
 *
 *	LOCKING:
 *	spin_lock_irqsave(host lock)
 *
 *	RETURNS:
 *	One if interrupt was handled, zero if not (shared irq).
 */
unsigned int ata_sff_host_intr(struct ata_port *ap,
				      struct ata_queued_cmd *qc)
{
	u8_t status, host_stat = 0;

	//printk("prot: %d stat: %x state: %d\n",(int)qc->tf.protocol,(unsigned)host_stat,(int)ap->hsm_task_state);
	/* Check whether we are expecting interrupt in this state */
	switch (ap->hsm_task_state) {
	case HSM_ST_FIRST:
		/* Some pre-ATAPI-4 devices assert INTRQ
		 * at this state when ready to receive CDB.
		 */

		/* Check the ATA_DFLAG_CDB_INTR flag is enough here.
		 * The flag was turned on only for atapi devices.  No
		 * need to check ata_is_atapi(qc->tf.protocol) again.
		 */
		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
			goto idle_irq;
		break;
	case HSM_ST_LAST:
		if (qc->tf.protocol == ATA_PROT_DMA ||
		    qc->tf.protocol == ATAPI_PROT_DMA) {
			/* check status of DMA engine */
			host_stat = ap->ops->bmdma_status(ap);

			/* if it's not our irq... */
			if (!(host_stat & ATA_DMA_INTR))
				goto idle_irq;

			/* before we do anything else, clear DMA-Start bit */
			ap->ops->bmdma_stop(qc);

			if (unlikely(host_stat & ATA_DMA_ERR)) {
				/* error when transfering data to/from memory */
				qc->err_mask |= AC_ERR_HOST_BUS;
				ap->hsm_task_state = HSM_ST_ERR;
			}
		}
		break;
	case HSM_ST:
		break;
	default:
		goto idle_irq;
	}

	/* check main status, clearing INTRQ if needed */
	status = ata_sff_irq_status(ap);
	if (status & ATA_BUSY)
		goto idle_irq;

	/* ack bmdma irq events */
	ap->ops->sff_irq_clear(ap);

	ata_sff_hsm_move(ap, qc, status, 0);

	if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
				       qc->tf.protocol == ATAPI_PROT_DMA)){
		printk("DMA error. %s %s\n",__func__,__FILE__);
		BUG();
	}

	return 1;	/* irq handled */

idle_irq:
	return 0;	/* irq not handled */
}

void ata_sff_interrupt()
{
  struct ata_host *host = ahost;
  unsigned int i;
  unsigned int handled = 0;

  for (i = 0; i < host->n_ports; i++) {
    struct ata_port *ap;

    ap = host->ports[i];
    if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
      struct ata_queued_cmd *qc;

      qc = ata_qc_from_tag(ap, ap->link.active_tag);
      if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
          (qc->flags & ATA_QCFLAG_ACTIVE))
        handled |= ata_sff_host_intr(ap, qc);
    }
  }
	if(!handled){
		printk("ata interrupt handling failed.\n");
		BUG();
	}
}

void ata_bottom(void)
{
	ata_sff_interrupt();
}

void ata_piix_interrupt(struct regs_t *regs)
{
	raise_bottom(IDE_B);
}

void ata_finalize_port_ops(struct ata_port_operations *ops)
{
  const struct ata_port_operations *cur;
  void **begin = (void **)ops;
  void **end = (void **)&ops->inherits;
  void **pp;

  if (!ops || !ops->inherits)
    return;

  for (cur = ops->inherits; cur; cur = cur->inherits) {
    void **inherit = (void **)cur;

    for (pp = begin; pp < end; pp++, inherit++)
      if (!*pp)
        *pp = *inherit;
  }

  for (pp = begin; pp < end; pp++)
    if (IS_ERR(*pp))
      *pp = NULL;

  ops->inherits = NULL;
}

void ata_freeze_port(struct ata_port *ap)
{
  if (ap->ops->freeze)
    ap->ops->freeze(ap);

  ap->pflags |= ATA_PFLAG_FROZEN;
}

void ata_host_start(struct ata_host *host)
{
	int i,rc;

	if(host->flags & ATA_HOST_STARTED){
		printk("ata_host started prematurely.\n");
		while(1);
	}

	ata_finalize_port_ops(host->ops);

  for (i = 0; i < host->n_ports; i++) {
    struct ata_port *ap = host->ports[i];

    ata_finalize_port_ops(ap->ops);

    if (!host->ops)
      host->ops = ap->ops;
  }

  for (i = 0; i < host->n_ports; i++) {
    struct ata_port *ap = host->ports[i];

    if (ap->ops->port_start) {
      rc = ap->ops->port_start(ap);
      if (rc) {
				printk("Can't start ata port: %d\n",i);
				while(1);
      }
    }
    ata_freeze_port(ap);
  }

  host->flags |= ATA_HOST_STARTED;
}

int sata_link_init_spd(struct ata_link *link)
{
	/*
	 *FIXME!
   *This operaion is only supported when the
 	 *hard drive is in SATA mode.
	 *Let's ignore it for now.
	 */
	return -EOPNOTSUPP;
}

static inline int ata_id_is_cfa(const u16_t *id)
{     
  if (id[ATA_ID_CONFIG] == 0x848A)  /* Traditional CF */
    return 1;
  /*  
   * CF specs don't require specific value in the word 0 anymore and yet
   * they forbid to report the ATA version in the word 80 and require the
   * CFA feature set support to be indicated in the word 83 in this case.
   * Unfortunately, some cards only follow either of this requirements,
   * and while those that don't indicate CFA feature support need some
   * sort of quirk list, it seems impractical for the ones that do...
   */
  if ((id[ATA_ID_COMMAND_SET_2] & 0xC004) == 0x4004)
    return 1;
  return 0;
} 

/**
 *  ata_exec_internal - execute libata internal command
 *  @dev: Device to which the command is sent
 *  @tf: Taskfile registers for the command and the result
 *  @cdb: CDB for packet command
 *  @dma_dir: Data tranfer direction of the command
 *  @buf: Data buffer of the command
 *  @buflen: Length of data buffer
 *  @timeout: Timeout in msecs (0 for default)
 *
 *  Wrapper around ata_exec_internal_sg() which takes simple
 *  buffer instead of sg list.
 *
 *  LOCKING:
 *  None.  Should be called with kernel context, might sleep.
 *
 *  RETURNS:
 *  Zero on success, AC_ERR_* mask on failure
 */
/**
 *  ata_sg_init - Associate command with scatter-gather table.
 *  @qc: Command to be associated
 *  @sg: Scatter-gather table.
 *  @n_elem: Number of elements in s/g table.
 *
 *  Initialize the data-related elements of queued_cmd @qc
 *  to point to a scatter-gather table @sg, containing @n_elem
 *  elements.
 *
 *  LOCKING:
 *  spin_lock_irqsave(host lock)
 */
void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
     unsigned int n_elem)
{
  qc->sg = sg;
  qc->n_elem = n_elem;
  qc->cursg = qc->sg;
}

#if 1
static void ata_qc_complete_internal(void *qc)
{
	printk("ata_qc_complete_internal.\n");
}
#endif

/**
 *  ata_sg_setup - DMA-map the scatter-gather table associated with a command.
 *  @qc: Command with scatter-gather table to be mapped.
 *
 *  DMA-map the scatter-gather table associated with queued_cmd @qc.
 *
 *  LOCKING:
 *  spin_lock_irqsave(host lock)
 *
 *  RETURNS:
 *  Zero on success, negative on error.
 *
 */
int dma_map_sg(struct scatterlist *sg,int nents,enum dma_data_direction dir)
{
  struct scatterlist *s;
  int i;

  if(nents == 0 || sg[0].length == 0){
		printk("error. %s in %s\n",__func__,__FILE__);
		while(1);
	}

  for_each_sg(sg, s, nents, i) {
    if(!sg_page(s)){
			printk("sg_page error. %s in %s\n",__func__,__FILE__);
			while(1);
		}

    s->dma_address = sg_phys(s);
    s->dma_length = s->length;
  }

  return nents; 
}
static int ata_sg_setup(struct ata_queued_cmd *qc)
{
  unsigned int n_elem;

  n_elem = dma_map_sg(qc->sg, qc->n_elem, qc->dma_dir);
  if (n_elem < 1)
    return -1;

  qc->orig_n_elem = qc->n_elem;
  qc->n_elem = n_elem;
  qc->flags |= ATA_QCFLAG_DMAMAP;

  return 0;
}

/**
 *  ata_qc_issue - issue taskfile to device
 *  @qc: command to issue to device
 *
 *  Prepare an ATA command to submission to device.
 *  This includes mapping the data into a DMA-able
 *  area, filling in the S/G table, and finally
 *  writing the taskfile to hardware, starting the command.
 *
 *  LOCKING:
 *  spin_lock_irqsave(host lock)
 */
void ata_qc_issue(struct ata_queued_cmd *qc)
{
  struct ata_port *ap = qc->ap;
  struct ata_link *link = qc->dev->link;
  u8_t prot = qc->tf.protocol;

  /* Make sure only one non-NCQ command is outstanding.  The
   * check is skipped for old EH because it reuses active qc to
   * request ATAPI sense.
   */
  if(unlikely((ap->ops->error_handler && ata_tag_valid(link->active_tag)))){
    printk("can not issue the command.%s of %s\n",__func__,__FILE__);
    while(1);
  }

  if (ata_is_ncq(prot)) {
		printk("NCQ is not supported now.\n");
		while(1);
  } else {
    ap->nr_active_links++;
    link->active_tag = qc->tag;
  }

  qc->flags |= ATA_QCFLAG_ACTIVE;
  ap->qc_active |= 1 << qc->tag;

  /* We guarantee to LLDs that they will have at least one
   * non-zero sg if the command is a data command.
   */
  if(unlikely(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes))){
    printk("BUG in %s of %s\n",__func__,__FILE__);
    while(1);
  }

  if (ata_is_dma(prot) || (ata_is_pio(prot) &&
         (ap->flags & ATA_FLAG_PIO_DMA)))
    if (ata_sg_setup(qc)){
			printk("setuping DMA error.%s in %s\n",__func__,__FILE__);
			while(1);
		}

  /* if device is sleeping, schedule reset and abort the link */
  if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
		printk("Device is sleeping. %s in %s\n",__func__,__FILE__);
		while(1);
  }

  ap->ops->qc_prep(qc);

  qc->err_mask |= ap->ops->qc_issue(qc);
  if (unlikely(qc->err_mask)){
    printk("qc_issue error.\n");
		while(1);
	}
  return;
}

/**
 *  ata_qc_free - free unused ata_queued_cmd
 *  @qc: Command to complete
 *
 *  Designed to free unused ata_queued_cmd object
 *  in case something prevents using it.
 *
 *  LOCKING:
 *  spin_lock_irqsave(host lock)
 */
void ata_qc_free(struct ata_queued_cmd *qc)
{
  struct ata_port *ap;
  unsigned int tag;

  if(unlikely(qc == NULL)){
		printk("trying to free a null command.\n");
		while(1);
	}
  ap = qc->ap;

  qc->flags = 0;
  tag = qc->tag;
  if (likely(ata_tag_valid(tag))) {
    qc->tag = ATA_TAG_POISON;
    clear_bit(tag, &ap->qc_allocated);
  }
}

/**
 *	ata_pio_sector - Transfer a sector of data.
 *	@qc: Command on going
 *
 *	Transfer qc->sect_size bytes of data from/to the ATA device.
 *
 *	LOCKING:
 *	Inherited from caller.
 */
static void ata_pio_sector(struct ata_queued_cmd *qc)
{
	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
	struct ata_port *ap = qc->ap;
	struct page *page;
	unsigned int offset;
	unsigned char *buf;

	if (qc->curbytes == qc->nbytes - qc->sect_size)
		ap->hsm_task_state = HSM_ST_LAST;

	page = sg_page(qc->cursg);
	offset = qc->cursg->offset + qc->cursg_ofs;

	/* get the current page and offset */
	page = nth_page(page, (offset >> PAGE_SHIFT));
	offset %= PAGE_SIZE;

	printk("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");

	buf = (unsigned char*)page_address(page);
	ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
				       do_write);

	qc->curbytes += qc->sect_size;
	qc->cursg_ofs += qc->sect_size;

	if (qc->cursg_ofs == qc->cursg->length) {
		qc->cursg = sg_next(qc->cursg);
		qc->cursg_ofs = 0;
	}
}

/**
 *	ata_pio_sectors - Transfer one or many sectors.
 *	@qc: Command on going
 *
 *	Transfer one or many sectors of data from/to the
 *	ATA device for the DRQ request.
 *
 *	LOCKING:
 *	Inherited from caller.
 */
static void ata_pio_sectors(struct ata_queued_cmd *qc)
{
	if (is_multi_taskfile(&qc->tf)) {
		/* READ/WRITE MULTIPLE */
		unsigned int nsect;

		if(unlikely(qc->dev->multi_count == 0)){
			printk("%s %s\n",__func__,__FILE__);
			while(1);
		}

		nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
			    qc->dev->multi_count);
		while (nsect--)
			ata_pio_sector(qc);
	} else
		ata_pio_sector(qc);

	ata_sff_sync(qc->ap); /* flush */
}

/**
 *  ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
 *  @ap: the target ata_port
 *  @qc: qc on going
 *
 *  RETURNS:
 *  1 if ok in workqueue, 0 otherwise.
 */
static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
            struct ata_queued_cmd *qc)
{
  if (qc->tf.flags & ATA_TFLAG_POLLING)
    return 1;

  if (ap->hsm_task_state == HSM_ST_FIRST) {
    if (qc->tf.protocol == ATA_PROT_PIO &&
       (qc->tf.flags & ATA_TFLAG_WRITE))
        return 1;

    if (ata_is_atapi(qc->tf.protocol) &&
       !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
      return 1;
  }

  return 0;
}

/**
 *  atapi_send_cdb - Write CDB bytes to hardware
 *  @ap: Port to which ATAPI device is attached.
 *  @qc: Taskfile currently active
 *
 *  When device has indicated its readiness to accept
 *  a CDB, this function is called.  Send the CDB.
 *
 *  LOCKING:
 *  caller.
 */
static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
{
#if 1
	printk("%s %s\n",__func__,__FILE__);
	while(1);
#else
  /* send SCSI cdb */
  DPRINTK("send cdb\n");
  WARN_ON_ONCE(qc->dev->cdb_len < 12);

  ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
  ata_sff_sync(ap);
  /* FIXME: If the CDB is for DMA do we need to do the transition delay
     or is bmdma_start guaranteed to do it ? */
  switch (qc->tf.protocol) {
  case ATAPI_PROT_PIO:
    ap->hsm_task_state = HSM_ST;
    break;
  case ATAPI_PROT_NODATA:
    ap->hsm_task_state = HSM_ST_LAST;
    break;
  case ATAPI_PROT_DMA:
    ap->hsm_task_state = HSM_ST_LAST;
    /* initiate bmdma */
    ap->ops->bmdma_start(qc);
    break;
  }
#endif
}

/**
 *	atapi_pio_bytes - Transfer data from/to the ATAPI device.
 *	@qc: Command on going
 *
 *	Transfer Transfer data from/to the ATAPI device.
 *
 *	LOCKING:
 *	Inherited from caller.
 */
static void atapi_pio_bytes(struct ata_queued_cmd *qc)
{
#if 1
	printk("%s %s\n",__func__,__FILE__);
	while(1);
#else
	struct ata_port *ap = qc->ap;
	struct ata_device *dev = qc->dev;
	struct ata_eh_info *ehi = &dev->link->eh_info;
	unsigned int ireason, bc_lo, bc_hi, bytes;
	int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;

	/* Abuse qc->result_tf for temp storage of intermediate TF
	 * here to save some kernel stack usage.
	 * For normal completion, qc->result_tf is not relevant. For
	 * error, qc->result_tf is later overwritten by ata_qc_complete().
	 * So, the correctness of qc->result_tf is not affected.
	 */
	ap->ops->sff_tf_read(ap, &qc->result_tf);
	ireason = qc->result_tf.nsect;
	bc_lo = qc->result_tf.lbam;
	bc_hi = qc->result_tf.lbah;
	bytes = (bc_hi << 8) | bc_lo;

	/* shall be cleared to zero, indicating xfer of data */
	if (unlikely(ireason & (1 << 0)))
		goto atapi_check;

	/* make sure transfer direction matches expected */
	i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
	if (unlikely(do_write != i_write))
		goto atapi_check;

	if (unlikely(!bytes))
		goto atapi_check;

	VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);

	if (unlikely(__atapi_pio_bytes(qc, bytes)))
		goto err_out;
	ata_sff_sync(ap); /* flush */

	return;

 atapi_check:
	ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",
			  ireason, bytes);
 err_out:
	qc->err_mask |= AC_ERR_HSM;
	ap->hsm_task_state = HSM_ST_ERR;
#endif
}

static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap,
                 unsigned int tag)
{     
  struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
        
  if (unlikely(!qc) || !ap->ops->error_handler)
    return qc;
        
  if ((qc->flags & (ATA_QCFLAG_ACTIVE |
        ATA_QCFLAG_FAILED)) == ATA_QCFLAG_ACTIVE)
    return qc;
      
  return NULL;
}

/**
 *	ata_sg_clean - Unmap DMA memory associated with command
 *	@qc: Command containing DMA memory to be released
 *
 *	Unmap all mapped DMA memory associated with this command.
 *
 *	LOCKING:
 *	spin_lock_irqsave(host lock)
 */
void ata_sg_clean(struct ata_queued_cmd *qc)
{
	//struct ata_port *ap = qc->ap;
	//struct scatterlist *sg = qc->sg;
	//int dir = qc->dma_dir;

	//printk("unmapping %u sg elements\n", qc->n_elem);

	qc->flags &= ~ATA_QCFLAG_DMAMAP;
	qc->sg = NULL;
}

void __ata_qc_complete(struct ata_queued_cmd *qc)
{
	struct ata_port *ap;
	struct ata_link *link;

	if(!(qc->flags & ATA_QCFLAG_ACTIVE)){
		printk("%s %s\n",__func__,__FILE__);
		while(1);
	}

	ap = qc->ap;
	link = qc->dev->link;

	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
		ata_sg_clean(qc);

	/* command should be marked inactive atomically with qc completion */
	if (qc->tf.protocol == ATA_PROT_NCQ) {
		link->sactive &= ~(1 << qc->tag);
		if (!link->sactive)
			ap->nr_active_links--;
	} else {
		link->active_tag = ATA_TAG_POISON;
		ap->nr_active_links--;
	}

	/* clear exclusive status */
	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
		     ap->excl_link == link))
		ap->excl_link = NULL;

	/* atapi: mark qc as inactive to prevent the interrupt handler
	 * from completing the command twice later, before the error handler
	 * is called. (when rc != 0 and atapi request sense is needed)
	 */
	qc->flags &= ~ATA_QCFLAG_ACTIVE;
	ap->qc_active &= ~(1 << qc->tag);

	/* call completion callback */
	qc->complete_fn(qc);
}

static void fill_result_tf(struct ata_queued_cmd *qc)
{
  struct ata_port *ap = qc->ap;

  qc->result_tf.flags = qc->tf.flags;
  ap->ops->qc_fill_rtf(qc);
}

void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
{
#if 1
	printk("%s %s\n",__func__,__FILE__);
	while(1);
#else
  struct ata_port *ap = qc->ap;
        
  WARN_ON(!ap->ops->error_handler);
    
  qc->flags |= ATA_QCFLAG_FAILED;
  ata_eh_set_pending(ap, 1);

  /* The following will fail if timeout has already expired.
   * ata_scsi_error() takes care of such scmds on EH entry.
   * Note that ATA_QCFLAG_FAILED is unconditionally set after
   * this function completes.
   */
  blk_abort_request(qc->scsicmd->request);
#endif
}

/**
 *  ata_port_schedule_eh - schedule error handling without a qc
 *  @ap: ATA port to schedule EH for
 *
 *  Schedule error handling for @ap.  EH will kick in as soon as
 *  all commands are drained.
 *
 *  LOCKING:
 *  spin_lock_irqsave(host lock)
 */
void ata_port_schedule_eh(struct ata_port *ap)
{
#if 1
	printk("%s %s\n",__func__,__FILE__);
	while(1);
#else
  WARN_ON(!ap->ops->error_handler);

  if (ap->pflags & ATA_PFLAG_INITIALIZING)
    return;

  ata_eh_set_pending(ap, 1);
  scsi_schedule_eh(ap->scsi_host);
#endif
}

static void ata_verify_xfer(struct ata_queued_cmd *qc)
{     
  struct ata_device *dev = qc->dev;
  
  if (ata_tag_internal(qc->tag))
    return;
    
  if (ata_is_nodata(qc->tf.protocol))
    return;
      
  if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
    return;

  dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
}

/**
 *	ata_qc_complete - Complete an active ATA command
 *	@qc: Command to complete
 *
 *	Indicate to the mid and upper layers that an ATA
 *	command has completed, with either an ok or not-ok status.
 *
 *	LOCKING:
 *	spin_lock_irqsave(host lock)
 */
void ata_qc_complete(struct ata_queued_cmd *qc)
{
	struct ata_port *ap = qc->ap;

	/* XXX: New EH and old EH use different mechanisms to
	 * synchronize EH with regular execution path.
	 *
	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
	 * Normal execution path is responsible for not accessing a
	 * failed qc.  libata core enforces the rule by returning NULL
	 * from ata_qc_from_tag() for failed qcs.
	 *
	 * Old EH depends on ata_qc_complete() nullifying completion
	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
	 * not synchronize with interrupt handler.  Only PIO task is
	 * taken care of.
	 */
	if (ap->ops->error_handler) {
		struct ata_device *dev = qc->dev;
		struct ata_eh_info *ehi = &dev->link->eh_info;

		if (unlikely(qc->err_mask))
			qc->flags |= ATA_QCFLAG_FAILED;

		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
			/* always fill result TF for failed qc */
#if 1
			printk("task file failed. %s %s\n",__func__,__FILE__);
			while(1);
#else
			fill_result_tf(qc);

			if (!ata_tag_internal(qc->tag))
				ata_qc_schedule_eh(qc);
			else
				__ata_qc_complete(qc);
#endif
			return;
		}

		if(ap->pflags & ATA_PFLAG_FROZEN){
			printk("%s %s\n",__func__,__FILE__);
			while(1);
		}

		/* read result TF if requested */
		if (qc->flags & ATA_QCFLAG_RESULT_TF)
			fill_result_tf(qc);

		/* Some commands need post-processing after successful
		 * completion.
		 */
		switch (qc->tf.command) {
		case ATA_CMD_SET_FEATURES:
			if (qc->tf.feature != SETFEATURES_WC_ON &&
			    qc->tf.feature != SETFEATURES_WC_OFF)
				break;
			/* fall through */
		case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
		case ATA_CMD_SET_MULTI: /* multi_count changed */
			/* revalidate device */
			ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
			ata_port_schedule_eh(ap);
			break;

		case ATA_CMD_SLEEP:
			dev->flags |= ATA_DFLAG_SLEEPING;
			break;
		}

		if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
			ata_verify_xfer(qc);

		__ata_qc_complete(qc);
	} else {
		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
			return;

		/* read result TF if failed or requested */
		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
			fill_result_tf(qc);

		__ata_qc_complete(qc);
	}
}

int ata_port_freeze(struct ata_port *ap)
{
#if 1
	printk("%s %s\n",__func__,__FILE__);
	while(1);
#else
  int nr_aborted;

  WARN_ON(!ap->ops->error_handler);

  __ata_port_freeze(ap);
  nr_aborted = ata_port_abort(ap);

  return nr_aborted;
#endif
}

/**
 *	ata_hsm_qc_complete - finish a qc running on standard HSM
 *	@qc: Command to complete
 *	@in_wq: 1 if called from workqueue, 0 otherwise
 *
 *	Finish @qc which is running on standard HSM.
 *
 *	LOCKING:
 *	If @in_wq is zero, spin_lock_irqsave(host lock).
 *	Otherwise, none on entry and grabs host lock.
 */
static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
{
	struct ata_port *ap = qc->ap;

	if (ap->ops->error_handler) {
		if (in_wq) {
			/* EH might have kicked in while host lock is
			 * released.
			 */
			qc = ata_qc_from_tag(ap, qc->tag);
			if (qc) {
				if (likely(!(qc->err_mask & AC_ERR_HSM))) {
					ap->ops->sff_irq_on(ap);
					ata_qc_complete(qc);
				} else
					ata_port_freeze(ap);
			}
		} else {
			if (likely(!(qc->err_mask & AC_ERR_HSM)))
				ata_qc_complete(qc);
			else
				ata_port_freeze(ap);
		}
	} else {
		if (in_wq) {
			ap->ops->sff_irq_on(ap);
			ata_qc_complete(qc);
		} else
			ata_qc_complete(qc);
	}
}

/**
 *	ata_sff_hsm_move - move the HSM to the next state.
 *	@ap: the target ata_port
 *	@qc: qc on going
 *	@status: current device status
 *	@in_wq: 1 if called from workqueue, 0 otherwise
 *
 *	RETURNS:
 *	1 when poll next status needed, 0 otherwise.
 */
int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
		     u8_t status, int in_wq)
{
	//struct ata_eh_info *ehi = &ap->link.eh_info;
	//unsigned long flags = 0;
	int poll_next;

	if((qc->flags & ATA_QCFLAG_ACTIVE) == 0){
		printk("No. 1:%s %s\n",__func__,__FILE__);
		while(1);
	}

	/* Make sure ata_sff_qc_issue() does not throw things
	 * like DMA polling into the workqueue. Notice that
	 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
	 */
	if(in_wq != ata_hsm_ok_in_wq(ap, qc)){
		printk("No. 2: %s %s\n",__func__,__FILE__);
		while(1);
	}

fsm_start:
	//printk("ata: protocol %d task_state %d (dev_stat 0x%X)\n",
	//	qc->tf.protocol, ap->hsm_task_state, status);

	switch (ap->hsm_task_state) {
	case HSM_ST_FIRST:
		/* Send first data block or PACKET CDB */

		/* If polling, we will stay in the work queue after
		 * sending the data. Otherwise, interrupt handler
		 * takes over after sending the data.
		 */
		poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);

		/* check device status */
		if (unlikely((status & ATA_DRQ) == 0)) {
			/* handle BSY=0, DRQ=0 as error */
			if (likely(status & (ATA_ERR | ATA_DF)))
				/* device stops HSM for abort/error */
				qc->err_mask |= AC_ERR_DEV;
			else {
				/* HSM violation. Let EH handle this */
				qc->err_mask |= AC_ERR_HSM;
			}

			ap->hsm_task_state = HSM_ST_ERR;
			goto fsm_start;
		}

		/* Device should not ask for data transfer (DRQ=1)
		 * when it finds something wrong.
		 * We ignore DRQ here and stop the HSM by
		 * changing hsm_task_state to HSM_ST_ERR and
		 * let the EH abort the command or reset the device.
		 */
		if (unlikely(status & (ATA_ERR | ATA_DF))) {
			/* Some ATAPI tape drives forget to clear the ERR bit
			 * when doing the next command (mostly request sense).
			 * We ignore ERR here to workaround and proceed sending
			 * the CDB.
			 */
			if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
				qc->err_mask |= AC_ERR_HSM;
				ap->hsm_task_state = HSM_ST_ERR;
				goto fsm_start;
			}
		}

		/* Send the CDB (atapi) or the first data block (ata pio out).
		 * During the state transition, interrupt handler shouldn't
		 * be invoked before the data transfer is complete and
		 * hsm_task_state is changed. Hence, the following locking.
		 */

		if (qc->tf.protocol == ATA_PROT_PIO) {
			/* PIO data out protocol.
			 * send first data block.
			 */

			/* ata_pio_sectors() might change the state
			 * to HSM_ST_LAST. so, the state is changed here
			 * before ata_pio_sectors().
			 */
			ap->hsm_task_state = HSM_ST;
			ata_pio_sectors(qc);
		} else
			/* send CDB */
			atapi_send_cdb(ap, qc);

		/* if polling, ata_pio_task() handles the rest.
		 * otherwise, interrupt handler takes over from here.
		 */
		break;

	case HSM_ST:
		/* complete command or read/write the data register */
		if (qc->tf.protocol == ATAPI_PROT_PIO) {
			/* ATAPI PIO protocol */
			if ((status & ATA_DRQ) == 0) {
				/* No more data to transfer or device error.
				 * Device error will be tagged in HSM_ST_LAST.
				 */
				ap->hsm_task_state = HSM_ST_LAST;
				goto fsm_start;
			}

			/* Device should not ask for data transfer (DRQ=1)
			 * when it finds something wrong.
			 * We ignore DRQ here and stop the HSM by
			 * changing hsm_task_state to HSM_ST_ERR and
			 * let the EH abort the command or reset the device.
			 */
			if (unlikely(status & (ATA_ERR | ATA_DF))) {
				qc->err_mask |= AC_ERR_HSM;
				ap->hsm_task_state = HSM_ST_ERR;
				goto fsm_start;
			}

			atapi_pio_bytes(qc);

			if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
				/* bad ireason reported by device */
				goto fsm_start;

		} else {
			/* ATA PIO protocol */
			if (unlikely((status & ATA_DRQ) == 0)) {
				/* handle BSY=0, DRQ=0 as error */
				if (likely(status & (ATA_ERR | ATA_DF))) {
					/* device stops HSM for abort/error */
					qc->err_mask |= AC_ERR_DEV;

					/* If diagnostic failed and this is
					 * IDENTIFY, it's likely a phantom
					 * device.  Mark hint.
					 */
					if (qc->dev->horkage &
					    ATA_HORKAGE_DIAGNOSTIC)
						qc->err_mask |=
							AC_ERR_NODEV_HINT;
				} else {
					/* HSM violation. Let EH handle this.
					 * Phantom devices also trigger this
					 * condition.  Mark hint.
					 */
					qc->err_mask |= AC_ERR_HSM |
							AC_ERR_NODEV_HINT;
				}

				ap->hsm_task_state = HSM_ST_ERR;
				goto fsm_start;
			}

			/* For PIO reads, some devices may ask for
			 * data transfer (DRQ=1) alone with ERR=1.
			 * We respect DRQ here and transfer one
			 * block of junk data before changing the
			 * hsm_task_state to HSM_ST_ERR.
			 *
			 * For PIO writes, ERR=1 DRQ=1 doesn't make
			 * sense since the data block has been
			 * transferred to the device.
			 */
			if (unlikely(status & (ATA_ERR | ATA_DF))) {
				/* data might be corrputed */
				qc->err_mask |= AC_ERR_DEV;

				if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
					ata_pio_sectors(qc);
					status = ata_wait_idle(ap);
				}

				if (status & (ATA_BUSY | ATA_DRQ)) {
					qc->err_mask |= AC_ERR_HSM;
				}

				/* There are oddball controllers with
				 * status register stuck at 0x7f and
				 * lbal/m/h at zero which makes it
				 * pass all other presence detection
				 * mechanisms we have.  Set NODEV_HINT
				 * for it.  Kernel bz#7241.
				 */
				if (status == 0x7f)
					qc->err_mask |= AC_ERR_NODEV_HINT;

				/* ata_pio_sectors() might change the
				 * state to HSM_ST_LAST. so, the state
				 * is changed after ata_pio_sectors().
				 */
				ap->hsm_task_state = HSM_ST_ERR;
				goto fsm_start;
			}

			ata_pio_sectors(qc);

			if (ap->hsm_task_state == HSM_ST_LAST &&
			    (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
				/* all data read */
				status = ata_wait_idle(ap);
				goto fsm_start;
			}
		}

		poll_next = 1;
		break;

	case HSM_ST_LAST:
		if (unlikely(!ata_ok(status))) {
			qc->err_mask |= __ac_err_mask(status);
			ap->hsm_task_state = HSM_ST_ERR;
			goto fsm_start;
		}

		/* no more data to transfer */
		//printk("ata: dev %u command complete, drv_stat 0x%x\n",
		//	qc->dev->devno, status);

		if(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM)){
			printk("No.3: %s %s\n",__func__,__FILE__);
			while(1);
		}

		ap->hsm_task_state = HSM_ST_IDLE;

		/* complete taskfile transaction */
		ata_hsm_qc_complete(qc, in_wq);

		poll_next = 0;
		break;

	case HSM_ST_ERR:
		ap->hsm_task_state = HSM_ST_IDLE;

		/* complete taskfile transaction */
		ata_hsm_qc_complete(qc, in_wq);

		poll_next = 0;
		break;
	default:
		poll_next = 0;
		printk("No.4: %s %s\n",__func__,__FILE__);
		while(1);
	}

	return poll_next;
}

void ata_pio_task(struct ata_port *ap,struct ata_queued_cmd *qc)
{
  u8_t status;
  int poll_next;

fsm_start:
  if(unlikely(ap->hsm_task_state == HSM_ST_IDLE)){
		printk("The hardware should be doing something. %s %s\n",
			__func__,__FILE__);
		while(1);
	}

  /*
   * This is purely heuristic.  This is a fast path.
   * Sometimes when we enter, BSY will be cleared in
   * a chk-status or two.  If not, the drive is probably seeking
   * or something.  Snooze for a couple msecs, then
   * chk-status again.  If still busy, queue delayed work.
   */
  status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
  if (status & ATA_BUSY) {
    msleep(2);
    status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
    if (status & ATA_BUSY) {
			goto fsm_start;
    }
  }

  /* move the HSM */
  poll_next = ata_sff_hsm_move(ap, qc, status, 1);

  /* another command or interrupt handler
   * may be running at this point.
   */
  if (poll_next)
    goto fsm_start;
}

/**
 *	ata_exec_internal_sg - execute libata internal command
 *	@dev: Device to which the command is sent
 *	@tf: Taskfile registers for the command and the result
 *	@cdb: CDB for packet command
 *	@dma_dir: Data tranfer direction of the command
 *	@sgl: sg list for the data buffer of the command
 *	@n_elem: Number of sg entries
 *	@timeout: Timeout in msecs (0 for default)
 *
 *	Executes libata internal command with timeout.  @tf contains
 *	command on entry and result on return.  Timeout and error
 *	conditions are reported via return value.  No recovery action
 *	is taken after a command times out.  It's caller's duty to
 *	clean up after timeout.
 *
 *	LOCKING:
 *	None.  Should be called with kernel context, might sleep.
 *
 *	RETURNS:
 *	Zero on success, AC_ERR_* mask on failure
 */
unsigned ata_exec_internal_sg(struct ata_device *dev,
			      struct ata_taskfile *tf, const u8_t *cdb,
			      int dma_dir, struct scatterlist *sgl,
			      unsigned int n_elem, unsigned long timeout)
{
	struct ata_link *link = dev->link;
	struct ata_port *ap = link->ap;
	struct ata_queued_cmd *qc;
	unsigned int tag, preempted_tag;
	u32_t preempted_sactive, preempted_qc_active;
	int preempted_nr_active_links;
	unsigned int err_mask;

	/* no internal command while frozen */
	if (ap->pflags & ATA_PFLAG_FROZEN) {
		return AC_ERR_SYSTEM;
	}

	/* initialize internal qc */

	/* XXX: Tag 0 is used for drivers with legacy EH as some
	 * drivers choke if any other tag is given.  This breaks
	 * ata_tag_internal() test for those drivers.  Don't use new
	 * EH stuff without converting to it.
	 */
	if (ap->ops->error_handler)
		tag = ATA_TAG_INTERNAL;
	else
		tag = 0;

	if (test_and_set_bit(tag, &ap->qc_allocated)){
		printk("BUG: %s of %s\n",__func__,__FILE__);
		while(1);
	}

	qc = __ata_qc_from_tag(ap, tag);

	qc->tag = tag;
	//qc->scsicmd = NULL;
	qc->ap = ap;
	qc->dev = dev;
	ata_qc_reinit(qc);

	preempted_tag = link->active_tag;
	preempted_sactive = link->sactive;
	preempted_qc_active = ap->qc_active;
	preempted_nr_active_links = ap->nr_active_links;
	link->active_tag = ATA_TAG_POISON;
	link->sactive = 0;
	ap->qc_active = 0;
	ap->nr_active_links = 0;

	/* prepare & issue qc */
	qc->tf = *tf;
	if (cdb)
		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
	qc->flags |= ATA_QCFLAG_RESULT_TF;
	qc->dma_dir = dma_dir;
	if (dma_dir != DMA_NONE) {
		unsigned int i, buflen = 0;
		struct scatterlist *sg;

		for_each_sg(sgl, sg, n_elem, i)
			buflen += sg->length;

		ata_sg_init(qc, sgl, n_elem);
		qc->nbytes = buflen;
	}

	qc->private_data = NULL;
	qc->complete_fn = ata_qc_complete_internal;

	ata_qc_issue(qc);

	if(qc->tf.flags & ATA_TFLAG_POLLING)
		ata_pio_task(ap,qc); //busy wait
	else{
		printk("we should expect DMA here. %s %s\n",
			__func__,__FILE__);
		BUG();	
	}

	//ata_port_flush_task(ap);

	/* do post_internal_cmd */
	if (ap->ops->post_internal_cmd)
		ap->ops->post_internal_cmd(qc);

	/* perform minimal error analysis */
	if (qc->flags & ATA_QCFLAG_FAILED) {
		printk("ATA_CMD Failed. %s %s\n",__func__,__FILE__);
		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
			qc->err_mask |= AC_ERR_DEV;

		if (!qc->err_mask)
			qc->err_mask |= AC_ERR_OTHER;

		if (qc->err_mask & ~AC_ERR_OTHER)
			qc->err_mask &= ~AC_ERR_OTHER;
	}
	/* finish up */

	*tf = qc->result_tf;
	err_mask = qc->err_mask;

	ata_qc_free(qc);
	link->active_tag = preempted_tag;
	link->sactive = preempted_sactive;
	ap->qc_active = preempted_qc_active;
	ap->nr_active_links = preempted_nr_active_links;

	/* XXX - Some LLDDs (sata_mv) disable port on command failure.
	 * Until those drivers are fixed, we detect the condition
	 * here, fail the command with AC_ERR_SYSTEM and reenable the
	 * port.
	 *
	 * Note that this doesn't change any behavior as internal
	 * command failure results in disabling the device in the
	 * higher layer for LLDDs without new reset/EH callbacks.
	 *
	 * Kill the following code as soon as those drivers are fixed.
	 */
	if (ap->flags & ATA_FLAG_DISABLED) {
		printk("port disabled after failure.\n");
		while(1);
	}

	if (err_mask & AC_ERR_TIMEOUT){
		printk("command time out.\n");
		while(1);
	}

	return err_mask;
}

static void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
{ 
  sg_init_table(sg, 1);
  sg_set_buf(sg, buf, buflen);
}

unsigned ata_exec_internal(struct ata_device *dev,
         struct ata_taskfile *tf, const u8_t *cdb,
         int dma_dir, void *buf, unsigned int buflen,
         unsigned long timeout)
{
  struct scatterlist *psg = NULL, sg;
  unsigned int n_elem = 0;

  if (dma_dir != DMA_NONE) {
		if(unlikely(!buf)){
			printk("Do you want to do DMA to an empty buffer?\n");
			while(1);
		}
    sg_init_one(&sg, buf, buflen);
    psg = &sg;
    n_elem++;
  }

  return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
            timeout);
}

/**
 *  ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
 *  @dev: Device to which command will be sent
 *  @enable: Whether to enable or disable the feature
 *  @feature: The sector count represents the feature to set
 *
 *  Issue SET FEATURES - SATA FEATURES command to device @dev
 *  on port @ap with sector count
 *
 *  LOCKING:
 *  PCI/etc. bus probe sem.
 *
 *  RETURNS:
 *  0 on success, AC_ERR_* mask otherwise.
 */
static unsigned int ata_dev_set_feature(struct ata_device *dev, u8_t enable,
          u8_t feature)
{
  struct ata_taskfile tf;
  unsigned int err_mask;

  /* set up set-features taskfile */
  ata_tf_init(dev, &tf);
  tf.command = ATA_CMD_SET_FEATURES;
  tf.feature = enable;
  tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
  tf.protocol = ATA_PROT_NODATA;
  tf.nsect = feature;

  err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);

  return err_mask;
}

/**
 *  ata_dev_init_params - Issue INIT DEV PARAMS command
 *  @dev: Device to which command will be sent
 *  @heads: Number of heads (taskfile parameter)
 *  @sectors: Number of sectors (taskfile parameter)
 *
 *  LOCKING:
 *  Kernel thread context (may sleep)
 *
 *  RETURNS:
 *  0 on success, AC_ERR_* mask otherwise.
 */
static unsigned int ata_dev_init_params(struct ata_device *dev,
          u16_t heads, u16_t sectors)
{
  struct ata_taskfile tf;
  unsigned int err_mask;

  /* Number of sectors per track 1-255. Number of heads 1-16 */
  if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
    return AC_ERR_INVALID;

  /* set up init dev params taskfile */
  ata_tf_init(dev, &tf);
  tf.command = ATA_CMD_INIT_DEV_PARAMS;
  tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
  tf.protocol = ATA_PROT_NODATA;
  tf.nsect = sectors;
  tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */

  err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
  /* A clean abort indicates an original or just out of spec drive
     and we should continue as we issue the setup based on the
     drive reported working geometry */
  if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
    err_mask = 0;

  return err_mask;
}

/**
 *  ata_do_dev_read_id    - default ID read method
 *  @dev: device
 *  @tf: proposed taskfile
 *  @id: data buffer
 *
 *  Issue the identify taskfile and hand back the buffer containing
 *  identify data. For some RAID controllers and for pre ATA devices
 *  this function is wrapped or replaced by the driver
 */
unsigned int ata_do_dev_read_id(struct ata_device *dev,
          struct ata_taskfile *tf, u16_t *id)
{
  return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
             id, sizeof(id[0]) * ATA_ID_WORDS, 0);
}

void ata_dev_read_id(struct ata_device *dev,unsigned int *p_class,
					unsigned int flags,u16_t *id)
{
	unsigned int class = *p_class;
	struct ata_taskfile tf;
	unsigned int err_mask;
	int tried_spinup = 0;

retry:
	ata_tf_init(dev,&tf);

  switch (class) {
  case ATA_DEV_SEMB:
    class = ATA_DEV_ATA;  /* some hard drives report SEMB sig */
  case ATA_DEV_ATA:
    tf.command = ATA_CMD_ID_ATA;
    break;
  case ATA_DEV_ATAPI:
    tf.command = ATA_CMD_ID_ATAPI;
    break;
  default:
    printk("unsupported class in %s of %",__func__,__FILE__);
    while(1);
  }

  tf.protocol = ATA_PROT_PIO;

  /* Some devices choke if TF registers contain garbage.  Make
   * sure those are properly initialized.
   */
  tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;

  /* Device presence detection is unreliable on some
   * controllers.  Always poll IDENTIFY if available.
   */
  tf.flags |= ATA_TFLAG_POLLING;

	err_mask = ata_do_dev_read_id(dev, &tf, id);	

	if(err_mask){
		printk("We should not expect error here in %s of %s\n",
				__func__,__FILE__);
		while(1);
	}

  if (class == ATA_DEV_ATA) {
    if (!ata_id_is_ata(id) && !ata_id_is_cfa(id)){
			printk("device class error @ line: %d of %s.\n",__LINE__,__FILE__);
			while(1);
		}
  } else {
    if (ata_id_is_ata(id)){
      printk("device class error @ line: %d of %s.\n",__LINE__,__FILE__);
			while(1);
		}
  }

  if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
    tried_spinup = 1;
    /*
     * Drive powered-up in standby mode, and requires a specific
     * SET_FEATURES spin-up subcommand before it will accept
     * anything other than the original IDENTIFY command.
     */
    err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
    if (err_mask && id[2] != 0x738c) {
      printk("SPINUP failed\n");
			while(1);
    }
    /*
     * If the drive initially returned incomplete IDENTIFY info,
     * we now must reissue the IDENTIFY command.
     */
    if (id[2] == 0x37c8)
      goto retry;
  }

  if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
    /*
     * The exact sequence expected by certain pre-ATA4 drives is:
     * SRST RESET
     * IDENTIFY (optional in early ATA)
     * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
     * anything else..
     * Some drives were very specific about that exact sequence.
     *
     * Note that ATA4 says lba is mandatory so the second check
     * shoud never trigger.
     */
    if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
      err_mask = ata_dev_init_params(dev, id[3], id[6]);
      if (err_mask) {
        printk("INIT_DEV_PARAMS failed\n");
				while(1);
      }

      /* current CHS translation info (id[53-58]) might be
       * changed. reread the identify device info.
       */
      flags &= ~ATA_READID_POSTRESET;
      goto retry;
    }
  }

  *p_class = class;
}

static u64_t ata_id_n_sectors(const u16_t *id)
{
  if (ata_id_has_lba(id)) {
    if (ata_id_has_lba48(id))
      return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
    else
      return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
  } else {
    if (ata_id_current_chs_valid(id))
      return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
             id[ATA_ID_CUR_SECTORS];
    else
      return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
             id[ATA_ID_SECTORS];
  }
}

u64_t ata_tf_to_lba48(const struct ata_taskfile *tf)
{
  u64_t sectors = 0;

  sectors |= ((u64_t)(tf->hob_lbah & 0xff)) << 40;
  sectors |= ((u64_t)(tf->hob_lbam & 0xff)) << 32;
  sectors |= ((u64_t)(tf->hob_lbal & 0xff)) << 24;
  sectors |= (tf->lbah & 0xff) << 16;
  sectors |= (tf->lbam & 0xff) << 8;
  sectors |= (tf->lbal & 0xff);

  return sectors;
}

u64_t ata_tf_to_lba(const struct ata_taskfile *tf)
{
  u64_t sectors = 0;

  sectors |= (tf->device & 0x0f) << 24;
  sectors |= (tf->lbah & 0xff) << 16;
  sectors |= (tf->lbam & 0xff) << 8;
  sectors |= (tf->lbal & 0xff);

  return sectors;
}

/**
 *  ata_read_native_max_address - Read native max address
 *  @dev: target device
 *  @max_sectors: out parameter for the result native max address
 *
 *  Perform an LBA48 or LBA28 native size query upon the device in
 *  question.
 *
 *  RETURNS:
 *  0 on success, -EACCES if command is aborted by the drive.
 *  -EIO on other errors.
 */
static int ata_read_native_max_address(struct ata_device *dev, u64_t *max_sectors)
{
  unsigned int err_mask;
  struct ata_taskfile tf;
  int lba48 = ata_id_has_lba48(dev->id);

  ata_tf_init(dev, &tf);

  /* always clear all address registers */
  tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;

  if (lba48) {
    tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
    tf.flags |= ATA_TFLAG_LBA48;
  } else
    tf.command = ATA_CMD_READ_NATIVE_MAX;

  tf.protocol |= ATA_PROT_NODATA;
  tf.device |= ATA_LBA;

  err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
  if (err_mask) {
    printk("failed to read native "
             "max address (err_mask=0x%x)\n", err_mask);
    if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
      return -EACCES;
    return -EIO;
  }

  if (lba48)
    *max_sectors = ata_tf_to_lba48(&tf) + 1;
  else
    *max_sectors = ata_tf_to_lba(&tf) + 1;
  if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
    (*max_sectors)--;
  return 0;
}

/**
 *  ata_set_max_sectors - Set max sectors
 *  @dev: target device
 *  @new_sectors: new max sectors value to set for the device
 *
 *  Set max sectors of @dev to @new_sectors.
 *
 *  RETURNS:
 *  0 on success, -EACCES if command is aborted or denied (due to
 *  previous non-volatile SET_MAX) by the drive.  -EIO on other
 *  errors.
 */
static int ata_set_max_sectors(struct ata_device *dev, u64_t new_sectors)
{
  unsigned int err_mask;
  struct ata_taskfile tf;
  int lba48 = ata_id_has_lba48(dev->id);

  new_sectors--;

  ata_tf_init(dev, &tf);

  tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;

  if (lba48) {
    tf.command = ATA_CMD_SET_MAX_EXT;
    tf.flags |= ATA_TFLAG_LBA48;

    tf.hob_lbal = (new_sectors >> 24) & 0xff;
    tf.hob_lbam = (new_sectors >> 32) & 0xff;
    tf.hob_lbah = (new_sectors >> 40) & 0xff;
  } else {
    tf.command = ATA_CMD_SET_MAX;

    tf.device |= (new_sectors >> 24) & 0xf;
  }

  tf.protocol |= ATA_PROT_NODATA;
  tf.device |= ATA_LBA;

  tf.lbal = (new_sectors >> 0) & 0xff;
  tf.lbam = (new_sectors >> 8) & 0xff;
  tf.lbah = (new_sectors >> 16) & 0xff;

  err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
  if (err_mask) {
    printk("failed to set "
             "max address (err_mask=0x%x)\n", err_mask);
    if (err_mask == AC_ERR_DEV &&
        (tf.feature & (ATA_ABORTED | ATA_IDNF)))
      return -EACCES;
    return -EIO;
  }

  return 0;
}

/**
 *  ata_id_string - Convert IDENTIFY DEVICE page into string
 *  @id: IDENTIFY DEVICE results we will examine
 *  @s: string into which data is output
 *  @ofs: offset into identify device page
 *  @len: length of string to return. must be an even number.
 *
 *  The strings in the IDENTIFY DEVICE page are broken up into
 *  16-bit chunks.  Run through the string, and output each
 *  8-bit chunk linearly, regardless of platform.
 *
 *  LOCKING:
 *  caller.
 */

void ata_id_string(const u16_t *id, unsigned char *s,
       unsigned int ofs, unsigned int len)
{
  unsigned int c;

  if(len & 1){
		printk("len is not valid. %s in %s\n",
				__func__,__FILE__);
		while(1);
	}

  while (len > 0) {
    c = id[ofs] >> 8;
    *s = c;
    s++;

    c = id[ofs] & 0xff;
    *s = c;
    s++;

    ofs++;
    len -= 2;
  }
}

/**
 *  ata_id_c_string - Convert IDENTIFY DEVICE page into C string
 *  @id: IDENTIFY DEVICE results we will examine
 *  @s: string into which data is output
 *  @ofs: offset into identify device page
 *  @len: length of string to return. must be an odd number.
 *
 *  This function is identical to ata_id_string except that it
 *  trims trailing spaces and terminates the resulting string with
 *  null.  @len must be actual maximum length (even number) + 1.
 *
 *  LOCKING:
 *  caller.
 */
void ata_id_c_string(const u16_t *id, unsigned char *s,
         unsigned int ofs, unsigned int len)
{
  unsigned char *p;

  ata_id_string(id, s, ofs, len - 1);

  p = s + strnlen(s, len - 1);
  while (p > s && p[-1] == ' ')
    p--;
  *p = '\0';
}

/**
 *  ata_dev_same_device - Determine whether new ID matches configured device
 *  @dev: device to compare against
 *  @new_class: class of the new device
 *  @new_id: IDENTIFY page of the new device
 *
 *  Compare @new_class and @new_id against @dev and determine
 *  whether @dev is the device indicated by @new_class and
 *  @new_id.
 *
 *  LOCKING:
 *  None.
 *
 *  RETURNS:
 *  1 if @dev matches @new_class and @new_id, 0 otherwise.
 */
static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
             const u16_t *new_id)
{
  const u16_t *old_id = dev->id;
  unsigned char model[2][ATA_ID_PROD_LEN + 1];
  unsigned char serial[2][ATA_ID_SERNO_LEN + 1];

  if (dev->class != new_class) {
    printk("class mismatch %d != %d\n",
             dev->class, new_class);
    return 0;
  }

  ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
  ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
  ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
  ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));

  if (strcmp((const char*)model[0], (const char*)model[1])) {
    printk("model number mismatch "
             "'%s' != '%s'\n", model[0], model[1]);
    return 0;
  }

  if (strcmp((const char*)serial[0], (const char*)serial[1])) {
    printk("serial number mismatch "
             "'%s' != '%s'\n", serial[0], serial[1]);
    return 0;
  }
	
	return 1;
}

/**
 *  ata_dev_reread_id - Re-read IDENTIFY data
 *  @dev: target ATA device
 *  @readid_flags: read ID flags
 *
 *  Re-read IDENTIFY page and make sure @dev is still attached to
 *  the port.
 *
 *  LOCKING:
 *  Kernel thread context (may sleep)
 *
 *  RETURNS:
 *  0 on success, negative errno otherwise
 */
int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
{
  unsigned int class = dev->class;
  u16_t *id = (void *)dev->link->ap->sector_buf;

  /* read ID data */
  ata_dev_read_id(dev, &class, readid_flags, id);

  /* is the device still there? */
  if (!ata_dev_same_device(dev, class, id))
    return -ENODEV;

  memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
  return 0;
}

/**
 *	ata_hpa_resize		-	Resize a device with an HPA set
 *	@dev: Device to resize
 *
 *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
 *	it if required to the full size of the media. The caller must check
 *	the drive has the HPA feature set enabled.
 *
 *	RETURNS:
 *	0 on success, -errno on failure.
 */
static int ata_hpa_resize(struct ata_device *dev)
{
	u64_t sectors = ata_id_n_sectors(dev->id);
	u64_t native_sectors;
	int rc;

	/* do we need to do it? */
	if (dev->class != ATA_DEV_ATA ||
	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
	    (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
		return 0;

	/* read native max address */
	rc = ata_read_native_max_address(dev, &native_sectors);
	if (rc) {
		/* If device aborted the command or HPA isn't going to
		 * be unlocked, skip HPA resizing.
		 */
		if (rc == -EACCES) {
			printk("HPA support seems broken, skipping HPA handling\n");
			while(1);
		}
		return rc;
	}

	dev->n_native_sectors = native_sectors;

	/* nothing to do? */
	if (native_sectors <= sectors) {
		if (native_sectors > sectors)
			printk("HPA detected: current %llu, native %llu\n",
				(unsigned long long)sectors,
				(unsigned long long)native_sectors);
		else if (native_sectors < sectors)
			printk("native sectors (%llu) is smaller than "
				"sectors (%llu)\n",
				(unsigned long long)native_sectors,
				(unsigned long long)sectors);
		return 0;
	}

	/* let's unlock HPA */
	rc = ata_set_max_sectors(dev, native_sectors);
	if (rc == -EACCES) {
		/* if device aborted the command, skip HPA resizing */
		printk("device aborted resize (%llu -> %llu), skipping HPA handling\n",
			       (unsigned long long)sectors,
			       (unsigned long long)native_sectors);
		while(1);
	} else if (rc)
		return rc;

	/* re-read IDENTIFY data */
	rc = ata_dev_reread_id(dev, 0);
	if (rc) {
		printk("failed to re-read IDENTIFY "
			       "data after HPA resizing\n");
		while(1);
	}

	u64_t new_sectors = ata_id_n_sectors(dev->id);
	printk("HPA unlocked: %llu -> %llu, native %llu\n",
		(unsigned long long)sectors,
		(unsigned long long)new_sectors,
		(unsigned long long)native_sectors);

	return 0;
}

/**
 *  ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
 *  @pio_mask: pio_mask
 *  @mwdma_mask: mwdma_mask
 *  @udma_mask: udma_mask
 *
 *  Pack @pio_mask, @mwdma_mask and @udma_mask into a single
 *  unsigned int xfer_mask.
 *
 *  LOCKING:
 *  None.
 *
 *  RETURNS:
 *  Packed xfer_mask.
 */
unsigned long ata_pack_xfermask(unsigned long pio_mask,
        unsigned long mwdma_mask,
        unsigned long udma_mask)
{
  return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
    ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
    ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
}

/**
 *  ata_id_xfermask - Compute xfermask from the given IDENTIFY data
 *  @id: IDENTIFY data to compute xfer mask from
 *
 *  Compute the xfermask for this device. This is not as trivial
 *  as it seems if we must consider early devices correctly.
 *
 *  FIXME: pre IDE drive timing (do we care ?).
 *
 *  LOCKING:
 *  None.
 *
 *  RETURNS:
 *  Computed xfermask
 */
unsigned long ata_id_xfermask(const u16_t *id)
{
  unsigned long pio_mask, mwdma_mask, udma_mask;

  /* Usual case. Word 53 indicates word 64 is valid */
  if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
    pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
    pio_mask <<= 3;
    pio_mask |= 0x7;
  } else {
    /* If word 64 isn't valid then Word 51 high byte holds
     * the PIO timing number for the maximum. Turn it into
     * a mask.
     */
    u8_t mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
    if (mode < 5) /* Valid PIO range */
      pio_mask = (2 << mode) - 1;
    else
      pio_mask = 1;

    /* But wait.. there's more. Design your standards by
     * committee and you too can get a free iordy field to
     * process. However its the speeds not the modes that
     * are supported... Note drivers using the timing API
     * will get this right anyway
     */
  }

  mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;

  if (ata_id_is_cfa(id)) {
    /*
     *  Process compact flash extended modes
     */
    int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
    int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;

    if (pio)
      pio_mask |= (1 << 5);
    if (pio > 1)
      pio_mask |= (1 << 6);
    if (dma)
      mwdma_mask |= (1 << 3);
    if (dma > 1)
      mwdma_mask |= (1 << 4);
  }

  udma_mask = 0;
  if (id[ATA_ID_FIELD_VALID] & (1 << 2))
    udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;

  return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
}

static int ata_dev_config_ncq(struct ata_device *dev,
             char *desc, size_t desc_sz)
{
  if (!ata_id_has_ncq(dev->id)) {
    desc[0] = '\0';
    return 0;
  }

	printk("NCQ not supported yet. %s %s\n",
			__func__,__FILE__);
	while(1);
}

/**
 *  sata_scr_valid - test whether SCRs are accessible
 *  @link: ATA link to test SCR accessibility for
 *
 *  Test whether SCRs are accessible for @link.
 *
 *  LOCKING:
 *  None.
 *
 *  RETURNS:
 *  1 if SCRs are accessible, 0 otherwise.
 */
int sata_scr_valid(struct ata_link *link)
{
  struct ata_port *ap = link->ap;

  return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
}

/**
 *  sata_pmp_read - read PMP register
 *  @link: link to read PMP register for
 *  @reg: register to read
 *  @r_val: resulting value
 *
 *  Read PMP register.
 *
 *  LOCKING:
 *  Kernel thread context (may sleep).
 *
 *  RETURNS:
 *  0 on success, AC_ERR_* mask on failure.
 */
static unsigned int sata_pmp_read(struct ata_link *link, int reg, u32_t *r_val)
{
  struct ata_port *ap = link->ap;
  struct ata_device *pmp_dev = ap->link.device;
  struct ata_taskfile tf;
  unsigned int err_mask;

  ata_tf_init(pmp_dev, &tf);
  tf.command = ATA_CMD_PMP_READ;
  tf.protocol = ATA_PROT_NODATA;
  tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48;
  tf.feature = reg;
  tf.device = link->pmp;

  err_mask = ata_exec_internal(pmp_dev, &tf, NULL, DMA_NONE, NULL, 0,
             SATA_PMP_RW_TIMEOUT);
  if (err_mask)
    return err_mask;

  *r_val = tf.nsect | tf.lbal << 8 | tf.lbam << 16 | tf.lbah << 24;
  return 0;
}

/**
 *  sata_pmp_scr_read - read PSCR
 *  @link: ATA link to read PSCR for
 *  @reg: PSCR to read
 *  @r_val: resulting value
 *
 *  Read PSCR @reg into @r_val for @link, to be called from
 *  ata_scr_read().
 *
 *  LOCKING:
 *  Kernel thread context (may sleep).
 *
 *  RETURNS:
 *  0 on success, -errno on failure.
 */
int sata_pmp_scr_read(struct ata_link *link, int reg, u32_t *r_val)
{
  unsigned int err_mask;

  if (reg > SATA_PMP_PSCR_CONTROL)
    return -EINVAL;

  err_mask = sata_pmp_read(link, reg, r_val);
  if (err_mask) {
    printk("failed to read SCR %d "
        "(Emask=0x%x)\n", reg, err_mask);
		while(1);
    return -EIO;
  }
  return 0;
}

/**
 *  sata_pmp_write - write PMP register
 *  @link: link to write PMP register for
 *  @reg: register to write
 *  @r_val: value to write
 *
 *  Write PMP register.
 *
 *  LOCKING:
 *  Kernel thread context (may sleep).
 *
 *  RETURNS:
 *  0 on success, AC_ERR_* mask on failure.
 */
static unsigned int sata_pmp_write(struct ata_link *link, int reg, u32_t val)
{
  struct ata_port *ap = link->ap;
  struct ata_device *pmp_dev = ap->link.device;
  struct ata_taskfile tf;

  ata_tf_init(pmp_dev, &tf);
  tf.command = ATA_CMD_PMP_WRITE;
  tf.protocol = ATA_PROT_NODATA;
  tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48;
  tf.feature = reg;
  tf.device = link->pmp;
  tf.nsect = val & 0xff;
  tf.lbal = (val >> 8) & 0xff;
  tf.lbam = (val >> 16) & 0xff;
  tf.lbah = (val >> 24) & 0xff;

  return ata_exec_internal(pmp_dev, &tf, NULL, DMA_NONE, NULL, 0,
         SATA_PMP_RW_TIMEOUT);
}

/**
 *  sata_pmp_scr_write - write PSCR
 *  @link: ATA link to write PSCR for
 *  @reg: PSCR to write
 *  @val: value to be written
 *
 *  Write @val to PSCR @reg for @link, to be called from
 *  ata_scr_write() and ata_scr_write_flush().
 *
 *  LOCKING:
 *  Kernel thread context (may sleep).
 *
 *  RETURNS:
 *  0 on success, -errno on failure.
 */
int sata_pmp_scr_write(struct ata_link *link, int reg, u32_t val)
{
  unsigned int err_mask;
    
  if (reg > SATA_PMP_PSCR_CONTROL)
    return -EINVAL;
  
  err_mask = sata_pmp_write(link, reg, val);
  if (err_mask) {
    printk("failed to write SCR %d "
        "(Emask=0x%x)\n", reg, err_mask);
    return -EIO;
  }
  return 0;
}

/**
 *  sata_scr_read - read SCR register of the specified port
 *  @link: ATA link to read SCR for
 *  @reg: SCR to read
 *  @val: Place to store read value
 *
 *  Read SCR register @reg of @link into *@val.  This function is
 *  guaranteed to succeed if @link is ap->link, the cable type of
 *  the port is SATA and the port implements ->scr_read.
 *
 *  LOCKING:
 *  None if @link is ap->link.  Kernel thread context otherwise.
 *
 *  RETURNS:
 *  0 on success, negative errno on failure.
 */
int sata_scr_read(struct ata_link *link, int reg, u32_t *val)
{
  if (ata_is_host_link(link)) {
    if (sata_scr_valid(link))
      return link->ap->ops->scr_read(link, reg, val);
    return -EOPNOTSUPP;
  }

  return sata_pmp_scr_read(link, reg, val);
}

/**
 *  sata_scr_write - write SCR register of the specified port
 *  @link: ATA link to write SCR for
 *  @reg: SCR to write
 *  @val: value to write
 *
 *  Write @val to SCR register @reg of @link.  This function is
 *  guaranteed to succeed if @link is ap->link, the cable type of
 *  the port is SATA and the port implements ->scr_read.
 *
 *  LOCKING:
 *  None if @link is ap->link.  Kernel thread context otherwise.
 *
 *  RETURNS:
 *  0 on success, negative errno on failure.
 */
int sata_scr_write(struct ata_link *link, int reg, u32_t val)
{
  if (ata_is_host_link(link)) {
    if (sata_scr_valid(link))
      return link->ap->ops->scr_write(link, reg, val);
    return -EOPNOTSUPP;
  }

  return sata_pmp_scr_write(link, reg, val);
}

/**
 *  ata_mode_string - convert xfer_mask to string
 *  @xfer_mask: mask of bits supported; only highest bit counts.
 *
 *  Determine string which represents the highest speed
 *  (highest bit in @modemask).
 *
 *  LOCKING:
 *  None.
 *
 *  RETURNS:
 *  Constant C string representing highest speed listed in
 *  @mode_mask, or the constant C string "<n/a>".
 */
const char *ata_mode_string(unsigned long xfer_mask)
{
  static const char * const xfer_mode_str[] = {
    "PIO0",
    "PIO1",
    "PIO2",
    "PIO3",
    "PIO4",
    "PIO5",
    "PIO6",
    "MWDMA0",
    "MWDMA1",
    "MWDMA2",
    "MWDMA3",
    "MWDMA4",
    "UDMA/16",
    "UDMA/25",
    "UDMA/33",
    "UDMA/44",
    "UDMA/66",
    "UDMA/100",
    "UDMA/133",
    "UDMA7",
  };
  int highbit;

  highbit = fls(xfer_mask) - 1;
  if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
    return xfer_mode_str[highbit];
  return "<n/a>";
}

static inline u8_t ata_dev_knobble(struct ata_device *dev)
{
  struct ata_port *ap = dev->link->ap;

 // if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
 //   return 0;

  return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
}

/**
 *	ata_dev_configure - Configure the specified ATA/ATAPI device
 *	@dev: Target device to configure
 *
 *	Configure @dev according to @dev->id.  Generic and low-level
 *	driver specific fixups are also applied.
 *
 *	LOCKING:
 *	Kernel thread context (may sleep)
 *
 *	RETURNS:
 *	0 on success, -errno otherwise
 */
int ata_dev_configure(struct ata_device *dev)
{
	struct ata_port *ap = dev->link->ap;
	const u16_t *id = dev->id;
	unsigned long xfer_mask;
	char revbuf[7];		/* XYZ-99\0 */
	unsigned char fwrevbuf[ATA_ID_FW_REV_LEN+1];
	unsigned char modelbuf[ATA_ID_PROD_LEN+1];
	int rc;

	if (!ata_dev_enabled(dev)){
		printk("device not enabled. %s in %s\n",__func__,__FILE__);
		while(1);
	}

	if (((ap->flags & ATA_FLAG_NO_ATAPI)) &&
	    dev->class == ATA_DEV_ATAPI) {
		printk("WARNING: ATAPI is %s, device ignored.\n",
			 "not supported with this driver");
		while(1);
	}

	/* massage HPA, do it early as it might change IDENTIFY data */
	rc = ata_hpa_resize(dev);
	if (rc)
		return rc;

	/* print device capabilities */
#if 0
	printk("%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
			   "85:%04x 86:%04x 87:%04x 88:%04x\n",
			   __func__,
			   id[49], id[82], id[83], id[84],
			   id[85], id[86], id[87], id[88]);
#endif
	/* initialize to-be-configured parameters */
	dev->flags &= ~ATA_DFLAG_CFG_MASK;
	dev->max_sectors = 0;
	dev->cdb_len = 0;
	dev->n_sectors = 0;
	dev->cylinders = 0;
	dev->heads = 0;
	dev->sectors = 0;
	dev->multi_count = 0;

	/*
	 * common ATA, ATAPI feature tests
	 */

	/* find max transfer mode; for printk only */
	xfer_mask = ata_id_xfermask(id);

	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
			sizeof(fwrevbuf));

	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
			sizeof(modelbuf));

	/* ATA-specific feature tests */
	if (dev->class == ATA_DEV_ATA) {
		if (ata_id_is_cfa(id)) {
			/* CPRM may make this media unusable */
			if (id[ATA_ID_CFA_KEY_MGMT] & 1)
				printk("supports DRM functions and may "
					       "not be fully accessable.\n");
			snprintf(revbuf, 7, "CFA");
		} else {
			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
			/* Warn the user if the device has TPM extensions */
			if (ata_id_has_tpm(id))
				printk("supports DRM functions and may "
					       "not be fully accessable.\n");
		}

		dev->n_sectors = ata_id_n_sectors(id);

		/* get current R/W Multiple count setting */
		if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
			unsigned int max = dev->id[47] & 0xff;
			unsigned int cnt = dev->id[59] & 0xff;
			/* only recognize/allow powers of two here */
			if (is_power_of_2(max) && is_power_of_2(cnt))
				if (cnt <= max)
					dev->multi_count = cnt;
		}

		if (ata_id_has_lba(id)) {
			const char *lba_desc;
			char ncq_desc[24];
	
			lba_desc = "LBA";
			dev->flags |= ATA_DFLAG_LBA;
			if (ata_id_has_lba48(id)) {
				dev->flags |= ATA_DFLAG_LBA48;
				lba_desc = "LBA48";

				if (dev->n_sectors >= (1UL << 28) &&
				    ata_id_has_flush_ext(id))
					dev->flags |= ATA_DFLAG_FLUSH_EXT;
			}

			/* config NCQ */
			rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
			if (rc)
				return rc;

			/* print device info to dmesg */
			printk("%s: %s, %s, max %s\n",
				revbuf, modelbuf, fwrevbuf,
				ata_mode_string(xfer_mask));
			printk("%u sectors, multi %u: %s %s\n",
				(unsigned long)dev->n_sectors,
				dev->multi_count, lba_desc, ncq_desc);

			/* fill the first entry of sd_partition_table */
			sd_partition_table.parts[0].from = 0;
			sd_partition_table.parts[0].size = dev->n_sectors;

		} else {
			/* CHS */

			/* Default translation */
			dev->cylinders	= id[1];
			dev->heads	= id[3];
			dev->sectors	= id[6];

			if (ata_id_current_chs_valid(id)) {
				/* Current CHS translation is valid. */
				dev->cylinders = id[54];
				dev->heads     = id[55];
				dev->sectors   = id[56];
			}

			/* print device info to dmesg */
			printk("%s: %s, %s, max %s\n",
				revbuf,	modelbuf, fwrevbuf,
				ata_mode_string(xfer_mask));
			printk("%u sectors, multi %u, CHS %u/%u/%u\n",
				(unsigned long)dev->n_sectors,
				dev->multi_count, dev->cylinders,
				dev->heads, dev->sectors);
		}
		dev->cdb_len = 16;
	}

	/* ATAPI-specific feature tests */
	else if (dev->class == ATA_DEV_ATAPI) {
		const char *cdb_intr_string = "";
		const char *atapi_an_string = "";
		const char *dma_dir_string = "";
		u32_t sntf;

		rc = atapi_cdb_len(id);
		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
				printk("unsupported CDB len\n");
				while(1);
		}
		dev->cdb_len = (unsigned int) rc;

		/* Enable ATAPI AN if both the host and device have
		 * the support.  If PMP is attached, SNTF is required
		 * to enable ATAPI AN to discern between PHY status
		 * changed notifications and ATAPI ANs.
		 */
		if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
		    (!sata_pmp_attached(ap) ||
		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
			unsigned int err_mask;

			/* issue SET feature command to turn this on */
			err_mask = ata_dev_set_feature(dev,
					SETFEATURES_SATA_ENABLE, SATA_AN);
			if (err_mask)
				printk("failed to enable ATAPI AN "
					"(err_mask=0x%x)\n", err_mask);
			else {
				dev->flags |= ATA_DFLAG_AN;
				atapi_an_string = ", ATAPI AN";
			}
		}

		if (ata_id_cdb_intr(dev->id)) {
			dev->flags |= ATA_DFLAG_CDB_INTR;
			cdb_intr_string = ", CDB intr";
		}

		if (atapi_id_dmadir(dev->id)) {
			dev->flags |= ATA_DFLAG_DMADIR;
			dma_dir_string = ", DMADIR";
		}

		/* print device info to dmesg */
		printk("ATAPI: %s, %s, max %s%s%s%s\n",
			  modelbuf, fwrevbuf,
			  ata_mode_string(xfer_mask),
			  cdb_intr_string, atapi_an_string,
			  dma_dir_string);
	}

	/* determine max_sectors */
	dev->max_sectors = ATA_MAX_SECTORS;
	if (dev->flags & ATA_DFLAG_LBA48)
		dev->max_sectors = ATA_MAX_SECTORS_LBA48;

	if (!(dev->horkage & ATA_HORKAGE_IPM)) {
		if (ata_id_has_hipm(dev->id))
			dev->flags |= ATA_DFLAG_HIPM;
		if (ata_id_has_dipm(dev->id))
			dev->flags |= ATA_DFLAG_DIPM;
	}

	/* Limit PATA drive on SATA cable bridge transfers to udma5,
	   200 sectors */
	if (ata_dev_knobble(dev)) {
		printk("applying bridge limits %s %s\n",__func__,__FILE__);
		dev->udma_mask &= ATA_UDMA5;
		dev->max_sectors = ATA_MAX_SECTORS;
	}

#if 0
	if ((dev->class == ATA_DEV_ATAPI) &&
	    (atapi_command_packet_set(id) == TYPE_TAPE)) {
		dev->max_sectors = ATA_MAX_SECTORS_TAPE;
		dev->horkage |= ATA_HORKAGE_STUCK_ERR;
	}

	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
					 dev->max_sectors);

	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
		dev->horkage |= ATA_HORKAGE_IPM;

		/* reset link pm_policy for this port to no pm */
		ap->pm_policy = MAX_PERFORMANCE;
	}
#endif

	if (ap->ops->dev_config)
		ap->ops->dev_config(dev);

#if 0
	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
		/* Let the user know. We don't want to disallow opens for
		   rescue purposes, or in case the vendor is just a blithering
		   idiot. Do this after the dev_config call as some controllers
		   with buggy firmware may want to avoid reporting false device
		   bugs */

		if (print_info) {
			ata_dev_printk(dev, KERN_WARNING,
"Drive reports diagnostics failure. This may indicate a drive\n");
			ata_dev_printk(dev, KERN_WARNING,
"fault or invalid emulation. Contact drive vendor for information.\n");
		}
	}

	if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
		ata_dev_printk(dev, KERN_WARNING, "WARNING: device requires "
			       "firmware update to be fully functional.\n");
		ata_dev_printk(dev, KERN_WARNING, "         contact the vendor "
			       "or visit http://ata.wiki.kernel.org.\n");
	}

#endif
	return 0;
}

/**
 *  __ata_port_freeze - freeze port
 *  @ap: ATA port to freeze
 *
 *  This function is called when HSM violation or some other
 *  condition disrupts normal operation of the port.  Frozen port
 *  is not allowed to perform any operation until the port is
 *  thawed, which usually follows a successful reset.
 *
 *  ap->ops->freeze() callback can be used for freezing the port
 *  hardware-wise (e.g. mask interrupt and stop DMA engine).  If a
 *  port cannot be frozen hardware-wise, the interrupt handler
 *  must ack and clear interrupts unconditionally while the port
 *  is frozen.
 *
 *  LOCKING:
 *  spin_lock_irqsave(host lock)
 */
static void __ata_port_freeze(struct ata_port *ap)
{
  if (ap->ops->freeze)
    ap->ops->freeze(ap);

  ap->pflags |= ATA_PFLAG_FROZEN;
}

/**
 *  ata_port_thaw_port - EH helper to thaw port
 *  @ap: ATA port to thaw
 *
 *  Thaw frozen port @ap.
 *
 *  LOCKING:
 *  None.
 */
void ata_eh_thaw_port(struct ata_port *ap)
{
  if (!ap->ops->error_handler)
    return;

  ap->pflags &= ~ATA_PFLAG_FROZEN;

  if (ap->ops->thaw)
    ap->ops->thaw(ap);
}

static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
      unsigned int *classes, unsigned long deadline,
      bool clear_classes)
{
  struct ata_device *dev;
	int i;

  if (clear_classes)
		for(i=0;i<2;i++){
			dev = &link->device[i];
      classes[dev->devno] = ATA_DEV_UNKNOWN;
		}

  return reset(link, classes, deadline);
}

/* used by ata_link_reset */
static const unsigned long ata_eh_reset_timeouts[] = {
  10000,  /* most drives spin up by 10sec */
  10000,  /* > 99% working drives spin up before 20sec */
  35000,  /* give > 30 secs of idleness for retarded devices */
   5000,  /* and sweet one last chance */
  ULONG_MAX, /* > 1 min has elapsed, give up */
};

void ata_link_reset(struct ata_link *link,
			ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
     	ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
{
  struct ata_eh_context *ehc = &link->eh_context;
  unsigned int *classes = ehc->classes;
	struct ata_port *ap = link->ap;
	unsigned int lflags = link->flags;
	struct ata_device *dev;
	ata_reset_fn_t reset;
	int rc,i;
	int try=0;
	u32_t sstatus;

	for(i=0;i<2;i++){
		dev = &link->device[i];
    /* If we issue an SRST then an ATA drive (not ATAPI)
     * may change configuration and be in PIO0 timing. If
     * we do a hard reset (or are coming from power on)
     * this is true for ATA or ATAPI. Until we've set a
     * suitable controller mode we should not touch the
     * bus as we may be talking too fast.
     */
    dev->pio_mode = XFER_PIO_0;

    /* If the controller has a pio mode setup function
     * then use it to set the chipset to rights. Don't
     * touch the DMA setup as that will be dealt with when
     * configuring devices.
     */
    if (ap->ops->set_piomode)
      ap->ops->set_piomode(ap, dev);
	}		

  /* prefer hardreset */
  reset = NULL;
  if (hardreset) {
    reset = hardreset;
  } else if (softreset) {
    reset = softreset;
  }

  if (prereset) {
    unsigned long deadline = ata_deadline(nowticks,
                  ATA_EH_PRERESET_TIMEOUT);

    rc = prereset(link, deadline);
    if (rc) {
      if (rc == -ENOENT) {
				int iter;
        printk("port disabled. ignoring.\n");
				for(iter=0;iter<2;iter++){
					dev = &link->device[i];
          classes[dev->devno] = ATA_DEV_NONE;
				}

        rc = 0;
      } else
        printk("prereset failed (errno=%d)\n", rc);
			printk("%s %s\n",__func__,__FILE__);
      while(1);
    }

    /* prereset() might have cleared ATA_EH_RESET.  If so,
     * bang classes, thaw and return.
     */
#if 0
    if (reset && !(ehc->i.action & ATA_EH_RESET)) {
			int iter;
			for(iter=0;iter<2;iter++){
				dev = &link->device[i];
        classes[dev->devno] = ATA_DEV_NONE;
			}

      if ((ap->pflags & ATA_PFLAG_FROZEN) &&
          ata_is_host_link(link))
        ata_eh_thaw_port(ap);
			printk("ata_dev_reset returned prematurely. %s %s\n",
				__func__,__FILE__);
    }
#endif
	}
  /*
   * Perform reset
   */
  if (ata_is_host_link(link))
		__ata_port_freeze(ap);

  unsigned long deadline = ata_deadline(nowticks, ata_eh_reset_timeouts[try++]);

  if (reset) {
    //printk("%s resetting link\n",
    //      reset == softreset ? "soft" : "hard");

    /* mark that this EH session started with reset */
    ehc->last_reset = nowticks;
    if (reset == hardreset)
      ehc->i.flags |= ATA_EHI_DID_HARDRESET;
    else
      ehc->i.flags |= ATA_EHI_DID_SOFTRESET;

    rc = ata_do_reset(link, reset, classes, deadline, true);
    if (rc/* && rc != -EAGAIN*/) {
			printk("ATA link is failed : %d. %s %s\n",rc,__func__,__FILE__);
			while(1);
    }
	}

  /*
   * Post-reset processing
   */
	for(i=0;i<2;i++){
		dev = &link->device[i];
    /* After the reset, the device state is PIO 0 and the
     * controller state is undefined.  Reset also wakes up
     * drives from sleeping mode.
     */
    dev->pio_mode = XFER_PIO_0;
    dev->flags &= ~ATA_DFLAG_SLEEPING;

    if (ata_phys_link_offline(ata_dev_phys_link(dev)))
      continue;

    /* apply class override */
    if (lflags & ATA_LFLAG_ASSUME_ATA)
      classes[dev->devno] = ATA_DEV_ATA;
    else if (lflags & ATA_LFLAG_ASSUME_SEMB)
      classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
  }

  /* record current link speed */
  if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
    link->sata_spd = (sstatus >> 4) & 0xf;

  /* thaw the port */
  if (ata_is_host_link(link))
    ata_eh_thaw_port(ap);

  /* postreset() should clear hardware SError.  Although SError
   * is cleared during link resume, clearing SError here is
   * necessary as some PHYs raise hotplug events after SRST.
   * This introduces race condition where hotplug occurs between
   * reset and here.  This race is mediated by cross checking
   * link onlineness and classification result later.
   */
  if (postreset) {
    postreset(link, classes);
  }
  /*
   * Some controllers can't be frozen very well and may set
   * spuruious error conditions during reset.  Clear accumulated
   * error information.  As reset is the final recovery action,
   * nothing is lost by doing this.
   */
  memset(&link->eh_info, 0, sizeof(link->eh_info));
  ap->pflags &= ~ATA_PFLAG_EH_PENDING;

	for(i=0;i<2;i++){
		dev = &link->device[i];
    if (ata_phys_link_online(ata_dev_phys_link(dev))) {
      if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
        printk("link online "
                 "but device misclassifed\n");
        classes[dev->devno] = ATA_DEV_NONE;
      }
    } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
      if (ata_class_enabled(classes[dev->devno]))
        printk("link offline, "
                 "clearing class %d to NONE\n",
                 classes[dev->devno]);
      classes[dev->devno] = ATA_DEV_NONE;
    } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
      printk("link status unknown, "
               "clearing UNKNOWN to NONE\n");
      classes[dev->devno] = ATA_DEV_NONE;
    }
	}		

  /* reset successful, schedule revalidation */
  ehc->last_reset = nowticks;  /* update to completion time */
  ehc->i.action |= ATA_EH_REVALIDATE;
}

/**
 *  sata_std_hardreset - COMRESET w/o waiting or classification
 *  @link: link to reset
 *  @class: resulting class of attached device
 *  @deadline: deadline jiffies for the operation
 *
 *  Standard SATA COMRESET w/o waiting or classification.
 *
 *  LOCKING:
 *  Kernel thread context (may sleep)
 *
 *  RETURNS:
 *  0 if link offline, -EAGAIN if link online, -errno on errors.
 */
int sata_std_hardreset(struct ata_link *link, unsigned int *class,
           unsigned long deadline)
{
  const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
  bool online;
  int rc;

  /* do hardreset */
  rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
  return online ? -EAGAIN : rc;
}

static inline int ata_is_builtin_hardreset(ata_reset_fn_t reset)
{
  if (reset == sata_std_hardreset)
    return 1;
  if (reset == sata_sff_hardreset)
    return 1;
  return 0;
}
 
void ata_dev_reset(struct ata_port *ap)
{
  ata_reset_fn_t softreset = ap->ops->softreset;
  ata_reset_fn_t hardreset = ap->ops->hardreset;

  /* Ignore ata_sff_softreset if ctl isn't accessible and
   * built-in hardresets if SCR access isn't available.
   */
  if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr)
    softreset = NULL;

  if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link))
    hardreset = NULL;

	ata_link_reset(&ap->link,ap->ops->prereset,softreset,hardreset,
			ap->ops->postreset);
}

void setup_ata_port(struct ata_port *ap)
{
	struct ata_device *dev;
	unsigned readid_flags = 0;

	readid_flags |= ATA_READID_POSTRESET;
	dev = &ap->link.device[0];
	ap->ctl = ATA_DRQ;
	/*
	 *ap[0]: HD drive
	 *ap[1]: CDROM
	 */
/*
	if(ap == ap->host->ports[0])
		dev->class = ATA_DEV_ATA;
	else 
		dev->class = ATA_DEV_ATAPI;
*/
	ata_dev_reset(ap);
	dev->class = ap->link.eh_context.classes[0];
	ata_dev_read_id(dev,&dev->class,readid_flags,dev->id);
	ata_dev_configure(dev);
}

/**
 *  ata_port_probe - Mark port as enabled
 *  @ap: Port for which we indicate enablement
 *
 *  Modify @ap data structure such that the system
 *  thinks that the entire port is enabled.
 *
 *  LOCKING: host lock, or some other form of
 *  serialization.
 */

void ata_port_probe(struct ata_port *ap)
{
  ap->flags &= ~ATA_FLAG_DISABLED;
}

void ata_piix_postinit2(struct ata_host *host)
{
	int i;

  for (i = 0; i < host->n_ports; i++) {
    struct ata_port *ap = host->ports[i];
    
    /* set SATA cable type if still unset */
    if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
      ap->cbl = ATA_CBL_SATA;
  
    /* init sata_spd_limit to the current value */
    sata_link_init_spd(&ap->link);
    if (ap->slave_link){
			printk("Do we have a slave linke? %s\n",__FILE__);
			while(1);
      //sata_link_init_spd(ap->slave_link);
		}
		ata_port_probe(ap);
		setup_ata_port(ap);
  }
}

typedef void (*irq_handler_t)(struct regs_t*);
void ata_piix_postinit(struct ata_host *host,irq_handler_t irq_handler)
{
	struct pci_device *pdev = host->dev;
	int legacy_mode = 0;

	ata_host_start(host);

  if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
    u8_t tmp8, mask;

    /* TODO: What if one channel is in native mode ... */
    pci_read_config_byte(pdev->bus,pdev->devfn,PCI_CLASS_PROG, &tmp8);
    mask = (1 << 2) | (1 << 0);
    if ((tmp8 & mask) != mask)
      legacy_mode = 1;
  }

	if(!legacy_mode && pdev->irq){
		printk("Standard mode not implemented yet. In %s of %s\n",
				__func__,__FILE__);
		while(1);
	}else if(legacy_mode){
		set_bottom(IDE_B,ata_bottom);
		put_irq_handler(0xe,irq_handler);
		put_irq_handler(0xf,irq_handler);
	}

	ata_piix_postinit2(host);
}

void ata_piix_init()
{
  struct pci_device *pdev;
	const struct pci_device_id *ent;
  struct ata_port_info port_info[2];
  struct ata_port_info *ppi[] = { &port_info[0], &port_info[1] };
	unsigned long port_flags;
	struct ata_host *host;

	pdev = find_dev(ATA_BUS_DOMAIN,ATA_BUS_NR,ATA_BUS_DEVFN);
  if(!pdev){
    printk("can not find the ata drive.\n");
    while(1);
  }

	pirq_enable_irq_ata(pdev);

	ent = pci_match_id(piix_pci_tbl,pdev);

  port_info[0] = piix_port_info[ent->driver_data];
  port_info[1] = piix_port_info[ent->driver_data];

  port_flags = port_info[0].flags;

	pci_enable_device(pdev);

	ata_pci_sff_prepare_host(pdev,ppi,&host);

	pci_set_master(pdev);

	ata_piix_postinit(host,ata_piix_interrupt);
	ahost = host;
}
