#ifndef __LIBATA_H__
#define __LIBATA_H__
#include <cnix/errno.h>
#include <cnix/string.h>

struct pci_bits {
  unsigned int    reg;  /* PCI config register to read */
  unsigned int    width;  /* 1 (8 bit), 2 (16 bit), 4 (32 bit) */
  unsigned long   mask;
  unsigned long   val;
};

enum {
  /* speed down verdicts */
  ATA_EH_SPDN_NCQ_OFF   = (1 << 0),
  ATA_EH_SPDN_SPEED_DOWN    = (1 << 1),
  ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2),
  ATA_EH_SPDN_KEEP_ERRORS   = (1 << 3),

  /* error flags */
  ATA_EFLAG_IS_IO     = (1 << 0),
  ATA_EFLAG_DUBIOUS_XFER    = (1 << 1),

  /* error categories */
  ATA_ECAT_NONE     = 0,
  ATA_ECAT_ATA_BUS    = 1,
  ATA_ECAT_TOUT_HSM   = 2,
  ATA_ECAT_UNK_DEV    = 3,
  ATA_ECAT_DUBIOUS_NONE   = 4,
  ATA_ECAT_DUBIOUS_ATA_BUS  = 5,
  ATA_ECAT_DUBIOUS_TOUT_HSM = 6,
  ATA_ECAT_DUBIOUS_UNK_DEV  = 7,
  ATA_ECAT_NR     = 8,

  ATA_EH_CMD_DFL_TIMEOUT    =  5000,

  /* always put at least this amount of time between resets */
  ATA_EH_RESET_COOL_DOWN    =  5000,

  /* Waiting in ->prereset can never be reliable.  It's
   * sometimes nice to wait there but it can't be depended upon;
   * otherwise, we wouldn't be resetting.  Just give it enough
   * time for most drives to spin up.
   */
  ATA_EH_PRERESET_TIMEOUT   = 10000,
  ATA_EH_FASTDRAIN_INTERVAL =  3000,

  ATA_EH_UA_TRIES     = 5,

  /* probe speed down parameters, see ata_eh_schedule_probe() */
  ATA_EH_PROBE_TRIAL_INTERVAL = 60000,  /* 1 min */
  ATA_EH_PROBE_TRIALS   = 2,
};

enum ata_xfer_mask {
  ATA_MASK_PIO    = ((1LU << ATA_NR_PIO_MODES) - 1)
          << ATA_SHIFT_PIO,
  ATA_MASK_MWDMA    = ((1LU << ATA_NR_MWDMA_MODES) - 1)
          << ATA_SHIFT_MWDMA,
  ATA_MASK_UDMA   = ((1LU << ATA_NR_UDMA_MODES) - 1)
          << ATA_SHIFT_UDMA,
};

enum hsm_task_states {
  HSM_ST_IDLE,    /* no command on going */
  HSM_ST_FIRST,   /* (waiting the device to)
           write CDB or first data block */
  HSM_ST,     /* (waiting the device to) transfer data */
  HSM_ST_LAST,    /* (waiting the device to) complete command */
  HSM_ST_ERR,   /* error */
};  

extern const unsigned long sata_deb_timing_normal[];  
extern const unsigned long sata_deb_timing_hotplug[];
extern const unsigned long sata_deb_timing_long[];

static inline const unsigned long *
sata_ehc_deb_timing(struct ata_eh_context *ehc)
{
  if (ehc->i.flags & ATA_EHI_HOTPLUGGED)
    return sata_deb_timing_hotplug;
  else
    return sata_deb_timing_normal;
} 

static inline void ata_qc_set_polling(struct ata_queued_cmd *qc)
{
  qc->tf.ctl |= ATA_NIEN;
}

static inline int ata_check_ready(u8_t status)
{
  if (!(status & ATA_BUSY))
    return 1;

  /* 0xff indicates either no device or device not ready */
  if (status == 0xff)
    return -ENODEV;

  return 0;
}

#define MAX_JIFFY_OFFSET ((LONG_MAX >> 1)-1)
#define MSEC_PER_SEC	1000
static unsigned long msecs_to_jiffies(const unsigned int m)
{
  /*
   * Negative value, means infinite timeout:
   */
  if ((int)m < 0)
    return MAX_JIFFY_OFFSET;

  /*
   * HZ is equal to or smaller than 1000, and 1000 is a nice
   * round multiple of HZ, divide with the factor between them,
   * but round upwards:
   */
  return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
}

static inline unsigned long ata_deadline(unsigned long from_jiffies,
           unsigned long timeout_msecs)
{
  return from_jiffies + msecs_to_jiffies(timeout_msecs);
}

/*      
 * device helpers
 */ 
static inline unsigned int ata_class_enabled(unsigned int class)
{ 
  return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI ||
    class == ATA_DEV_PMP || class == ATA_DEV_SEMB;
}   
  
static inline unsigned int ata_class_disabled(unsigned int class)
{     
  return class == ATA_DEV_ATA_UNSUP || class == ATA_DEV_ATAPI_UNSUP ||
    class == ATA_DEV_PMP_UNSUP || class == ATA_DEV_SEMB_UNSUP;
}

static inline unsigned int ata_class_absent(unsigned int class)
{
  return !ata_class_enabled(class) && !ata_class_disabled(class);
}

static inline unsigned int ata_dev_enabled(const struct ata_device *dev)
{
  return ata_class_enabled(dev->class);
}

static inline unsigned int ata_dev_disabled(const struct ata_device *dev)
{
  return ata_class_disabled(dev->class);
}
  
static inline unsigned int ata_dev_absent(const struct ata_device *dev)
{ 
  return ata_class_absent(dev->class);
} 

static inline int sata_pmp_supported(struct ata_port *ap)
{ 
  return ap->flags & ATA_FLAG_PMP;
} 

static inline int sata_pmp_attached(struct ata_port *ap)
{
  return ap->nr_pmp_links != 0;
}

static inline int ata_is_host_link(const struct ata_link *link)
{
  return link == &link->ap->link || link == link->ap->slave_link;
}

static inline unsigned int ac_err_mask(u8_t status)
{       
  if (status & (ATA_BUSY | ATA_DRQ))
    return AC_ERR_HSM;
  if (status & (ATA_ERR | ATA_DF))
    return AC_ERR_DEV;
  return 0;
}
 
static inline unsigned int __ac_err_mask(u8_t status)
{     
  unsigned int mask = ac_err_mask(status);
  if (mask == 0)
    return AC_ERR_OTHER;
  return mask;
}   

static inline unsigned int ata_tag_internal(unsigned int tag)
{   
  return tag == ATA_TAG_INTERNAL;
} 

static inline unsigned int ata_tag_valid(unsigned int tag)
{
  return (tag < ATA_MAX_QUEUE) ? 1 : 0;
}

static inline struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap,
                   unsigned int tag)
{
  if (likely(ata_tag_valid(tag)))
    return &ap->qcmd[tag];
  return NULL;
}

static inline void ata_tf_init(struct ata_device *dev, struct ata_taskfile *tf)
{ 
  memset(tf, 0, sizeof(*tf));
  
  tf->ctl = dev->link->ap->ctl;
  if (dev->devno == 0)
    tf->device = ATA_DEVICE_OBS;
  else
    tf->device = ATA_DEVICE_OBS | ATA_DEV1;
} 

static inline void ata_qc_reinit(struct ata_queued_cmd *qc)
{
  qc->dma_dir = DMA_NONE;
  qc->sg = NULL;
  qc->flags = 0;
  qc->cursg = NULL;
  qc->cursg_ofs = 0;
  qc->nbytes = qc->extrabytes = qc->curbytes = 0;
  qc->n_elem = 0;
  qc->err_mask = 0;
  qc->sect_size = ATA_SECT_SIZE;
    
  ata_tf_init(qc->dev, &qc->tf);
  
  /* init result_tf such that it indicates normal completion */
  qc->result_tf.command = ATA_DRDY;
  qc->result_tf.feature = 0;
}

/** 
 *  ata_ncq_enabled - Test whether NCQ is enabled
 *  @dev: ATA device to test for
 *    
 *  LOCKING:
 *  spin_lock_irqsave(host lock)
 *
 *  RETURNS:
 *  1 if NCQ is enabled for @dev, 0 otherwise.
 */ 
static inline int ata_ncq_enabled(struct ata_device *dev)
{   
  return (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF |
            ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ;
}   

#endif
