#ifndef DMA_H
#define DMA_H
/*
  author Sylvain Bertrand <sylvain.bertrand@gmail.com>
  Protected by linux GNU GPLv2
  Copyright 2012-2014
*/

#define DMAS_ERR		903
#define DMAS_RING_TIMEOUT	904
#define DMAS_FENCE_TIMEOUT	905

#define DMAS_N 2
#define DMA_RING_LOG2_DWS 16
#define DMA_RING_PF_DWS 16
#define DMA_RING_PF_DW_MASK (DMA_RING_PF_DWS - 1)
/*
 * Read and write pointers are byte indexes in the ring buffer. But they must
 * be dword aligned. Additionaly DMA_RING_LOG2_DWS mean 0xffff dwords for the
 * ring size, hence the mask is shifted 2 bits on the right to become again
 * a byte index.
 */
#define DMA_RING_BYTE_MASK 0x3fffc
struct dma {
	struct fence fence;
	struct ring ring;
	spinlock_t lock;
	u32 wptr; /* bytes index, dw aligned in ring buffer: accounted by CPU */
};

#ifdef DMAS_C
char *dmas_str[DMAS_N];
#else
extern char *dmas_str[DMAS_N];
#endif

void dmas_init_once(struct pci_dev *dev);
void dmas_resume(struct pci_dev *dev);
void dmas_intr_ena(struct pci_dev *dev);
void dmas_intr_reset(struct pci_dev *dev);
void dmas_stop(struct pci_dev *dev);
void dma_wr(struct pci_dev *dev, u8 dma, u32 v);
void dma_commit(struct pci_dev *dev, u8 dma);
#define DMA_PKT_CPY_BLK_DWS_N 5
u32 dma_pkt_cpy_blks_count(u64 dma_sz, u32 *last_blk_dma_sz);
void dma_pkt_cpy_blks_wr(struct pci_dev *dev, u8 dma, u64 dst, u64 src,
				u32 dma_pkt_cpy_blks_n, u64 last_blk_dma_sz);
struct dmas_timeout_info {
	u32 n_max;
	u32 us;
};
struct dmas_timeouts_info {
	struct dmas_timeout_info ring;
	struct dmas_timeout_info fence;
};
long dmas_cpy(struct pci_dev *dev, u64 dst, u64 src, u64 sz,
					struct dmas_timeouts_info t_info);
long dmas_u32_fill(struct pci_dev *dev, u64 dst, u64 dws_n, u32 constant,
					struct dmas_timeouts_info t_info);

u8 dmas_select(struct pci_dev *dev);

void dmas_mgcg_dis(struct pci_dev *dev);
void dmas_mgcg_ena(struct pci_dev *dev);
#endif
