#include "ide.h"
#include "arch/inout.h"
#include "kernel/irq.h"
#include "kernel/proc.h"
#include "kernel/page.h"
#include "kernel/heap.h"

#define MAX_RETRIES		(65535)

static bool detect_pci_ide_controller(uint* _bus, uint* _dev, uint* _func)
{
	uint bus, dev, func;
	for (bus = 0; bus < 256; ++bus)
	{
		for (dev = 0; dev < 32; ++dev)
		{
			for (func = 0; func < 8; ++func)
			{
				uint val;
				outl((1 << 31) | (bus << 16) | (dev << 11) | (func << 8) | 8, 0xcf8);
				val = inl(0xcfc) >> 16;

				// 0x101: base class = 01h (mass storage controller)
				// sub class = 01h (ide controller)
				if (val == 0x101) goto _found;
				else if (val == 0x106)
					printk("SATA device found which is not supported.\n");

				// if func = 0, check if it is a multi-functional device
				if (!func)
				{
					outl((1 << 31) | (bus << 16) | (dev << 11) | (func << 8) | 12, 0xcf8);
					val = inl(0xcfc) >> 16;

					// bit7 = 1 means a multi-functional device
					if (!(val & 0x80)) break;
				}
			}
		}
	}

	return false;
_found:
	*_bus = bus;
	*_dev = dev;
	*_func = func;
	return true;
}

// ide channel registers
static ide_channel_t channels[2] = {0};

static void tasklet_handle_pio(uint ch, struct ata_drive_info* drvinfo);
static void tasklet_handle_udma(uint ch, struct ata_drive_info* drvinfo);

static void ata_tasklet_handler(uint ch)
{
	struct ata_drive_info* drvinfo;

	if (channels[ch].sm.drive == IDE_CHNL_SM_DRIVE_UNKNOWN)
		return;
	drvinfo = &channels[ch].drvinfo[ide_sm_drive_idx(ch)];
	if (!(drvinfo->flags & ATA_DRIVE_AVAILABLE))
		return;

	if (drvinfo->flags & ATA_DRIVE_DOWNGRADE_PIO_MODE)
		tasklet_handle_pio(ch, drvinfo);
	else if (drvinfo->flags & ATA_DRIVE_LBA_SUPPORTED)
		tasklet_handle_udma(ch, drvinfo);
	else tasklet_handle_pio(ch, drvinfo);
}

// tasklet structure
static tasklet_t ata_tasklet[] =
{
	{
		NULL,
		0, 0,
		ata_tasklet_handler,
		0
	},
	{
		NULL,
		0, 0,
		ata_tasklet_handler,
		1
	},
};

static void ata_handler(int irq, void* devid, regs_t* regs)
{
	tasklet_schedule(&ata_tasklet[
		(devid == (void*)DEVID_ATA_MASTER)
		? 0 : 1]);
}

static irqaction_t ata_irqaction[] =
{
	{
		ata_handler,
		IAF_NORELEASE | SA_SHIRQ,
		"ata-master",
		(void*)DEVID_ATA_MASTER,
		NULL,
	},
	{
		ata_handler,
		IAF_NORELEASE | SA_SHIRQ,
		"ata-slave",
		(void*)DEVID_ATA_SLAVE,
		NULL,
	},
};

static uint pci_conf_read(uint bus, uint dev, uint func, uint offset, uint len)
{
	outl((1 << 31) | (bus << 16) | (dev << 11) | (func << 8) | offset, 0xcf8);
	switch (len)
	{
	case 1:	return inb(0xcfc + (offset & 3));
	case 2: return inw(0xcfc + (offset & 2));
	case 4: default: return inl(0xcfc);
	}
	return 0;
}

static void pci_conf_write(uint bus, uint dev, uint func, uint offset, uint len, uint value)
{
	outl((1 << 31) | (bus << 16) | (dev << 11) | (func << 8) | offset, 0xcf8);
	switch (len)
	{
	case 1:	outb((unsigned char)value, 0xcfc + (offset & 3)); break;
	case 2: outw((unsigned short)value, 0xcfc + (offset & 2)); break;
	case 4: default: outl(value, 0xcfc); break;
	}
}

static void check_set_pci_ide_irq(uint bus, uint dev, uint func)
{
/*
	// Check if this device needs an IRQ assignment:
	// Read the interrupt line field
	outl((1 << 31) | (bus << 16) | (dev << 11) | (func << 8) | 0x3C, 0xCF8);

	// Change the IRQ field to 0xFE
	outb(0xFE, 0xCFC);

	// Read the interrupt line field
	outl((1 << 31) | (bus << 16) | (dev << 11) | (func << 8) | 0x3C, 0xCF8);
	if ((inl(0xCFC) & 0xFF) == 0xFE)
		return false;
	
	return true;
*/
	int ret;
	if (channels[0].drvinfo[0].flags & ATA_DRIVE_AVAILABLE
		|| channels[0].drvinfo[1].flags & ATA_DRIVE_AVAILABLE)
	{
		ret = _request_irq(14, &ata_irqaction[0]);
		kassert(!ret);
	}

	if (channels[1].drvinfo[0].flags & ATA_DRIVE_AVAILABLE
		|| channels[1].drvinfo[1].flags & ATA_DRIVE_AVAILABLE)
	{
		ret = _request_irq(15, &ata_irqaction[1]);
		kassert(!ret);
	}
}

static void setup_ide_channel_registers(uint bus, uint dev, uint func)
{
	uint i, bars[5];

	// get the BAR0 - BAR3
	for (i = 0; i < 4; ++i)
		bars[i] = pci_conf_read(bus, dev, func,
		PCI_IDE_CFG_BAR0_OFFSET + i * 4, 4);

	// get the BAR4
	bars[4] = pci_conf_read(bus, dev, func, PCI_IDE_CFG_BAR4_OFFSET, 4);

	// Detect I/O Ports which interface IDE Controller:
	// BAR0: if it is 0x0 or 0x1, the port is 0x1F0.
	// BAR1: if it is 0x0 or 0x1, the port is 0x3F6.
	// BAR2: if it is 0x0 or 0x1, the port is 0x170.
	// BAR3: if it is 0x0 or 0x1, the port is 0x376.
	channels[ATA_PRIMARY  ].io_base		= (bars[0] & 0xFFFFFFFC) + 0x1F0 * (bars[0] < 2);
	channels[ATA_PRIMARY  ].ctrl_base	= (bars[1] & 0xFFFFFFFC) + 0x3F6 * (bars[1] < 2);
	channels[ATA_SECONDARY].io_base		= (bars[2] & 0xFFFFFFFC) + 0x170 * (bars[2] < 2);
	channels[ATA_SECONDARY].ctrl_base	= (bars[3] & 0xFFFFFFFC) + 0x376 * (bars[3] < 2);
	channels[ATA_PRIMARY  ].bmide		= (bars[4] & 0xFFFFFFFC) + 0; // Bus Master IDE
	channels[ATA_SECONDARY].bmide		= (bars[4] & 0xFFFFFFFC) + 8; // Bus Master IDE

	dbg_output3("pci-ide: channel0 - iobase:0x%x, ctrlbase:0x%x, bmide:0x%x\n",	\
		channels[ATA_PRIMARY].io_base,  \
		channels[ATA_PRIMARY].ctrl_base,\
		channels[ATA_PRIMARY].bmide);

	dbg_output3("pci-ide: channel1 - iobase:0x%x, ctrlbase:0x%x, bmide:0x%x\n",	\
		channels[ATA_SECONDARY].io_base,   \
		channels[ATA_SECONDARY].ctrl_base, \
		channels[ATA_SECONDARY].bmide);

	dbg_output3("pci-ide: bus master IDE - 0x%x\n", bars[4]);
}

static void ide_delay400ns(uint channel)
{
	int i;
	// Delay 400 nanosecond for BSY to be set:
	for (i = 0; i < 4; ++i)
		ide_ctrl_block_read(channel, 0);
}

static bool ide_wait_ready(uint channel)
{
	// wait for busy to be cleared
	uint retires = MAX_RETRIES;
	while ((ide_cmd_block_read(channel, ATA_REG_STATUS) & ATA_SR_BSY)
		&& retires) -- retires;	// wait for BSY to be zero
	return retires ? true : false;
}

static int ide_wait_identify_result(uint channel)
{
	uint stat = ide_cmd_block_read(channel, ATA_REG_STATUS);

	// check for errors
	// todo: retries
	do
	{
		stat = ide_cmd_block_read(channel, ATA_REG_STATUS);
		// check DRQ
		if ((stat & ATA_SR_DRQ)) return 0;
		else if (stat & ATA_SR_ERR) return 1;

		// see if this is a device fault
		else if (stat & ATA_SR_DF) return 2;
	}
	while (1);
}

// every time before you send a command to ATA
// you need to make sure drive ready
static bool ide_wait_drive_ready(uint channel)
{
	uint i = 0;
	while ((ide_cmd_block_read(channel, ATA_REG_STATUS)
		& (ATA_SR_DRDY | ATA_SR_BSY)) != ATA_SR_DRDY)
		if (++i > MAX_RETRIES) return false;
	return true;
}

static bool ide_select_drive(uint channel, uint drive, bool force, uint flags)
{
	static uint prev_hdd_sel_drv[2] = { 0xFF, 0xFF };
	unsigned char hdd_sel = (unsigned char)(flags | (drive << 4));

	if (prev_hdd_sel_drv[channel] == drive && !force)
		return ide_wait_drive_ready(channel);

	if (!ide_wait_ready(channel)) return false;
	// select the drive
	ide_cmd_block_write(channel, ATA_REG_HDDEVSEL, hdd_sel);
	ide_delay400ns(channel);
	prev_hdd_sel_drv[channel] = drive;
	return true;
}

static void ide_soft_reset(uint channel)
{
	ide_ctrl_block_write(channel, 0, 4);
	ide_delay400ns(channel);
	ide_ctrl_block_write(channel, 0, 0);
	ide_delay400ns(channel);
}

static void fixstr(char *buf, size_t sz)
{
	uint i;
	kassert(!(sz & 1));

	for (i = 0; i < sz; i += 2, buf += 2)
	{
		char tmp = *buf;
		*buf = buf[1];
		buf[1] = tmp;
	}
}

static void print_str(char *buf, size_t sz)
{
	char tmp[64];
	memcpy(tmp, buf, sz);
	tmp[sz] = '\0';

	for (--sz; tmp[sz] == ' '; --sz);
	tmp[++sz] = '\0';
	printk(tmp);
}

static int ide_wait_cmd_result(uint channel)
{
	uint state;
	ide_wait_ready(channel);
	state = ide_cmd_block_read(channel, ATA_REG_STATUS);

	// Check For Errors:
	if (state & ATA_SR_ERR)
		return 2; // Error.

	// Check If Device fault:
	if (state & ATA_SR_DF)
		return 1; // Device Fault.

	// Check DRQ: BSY = 0; DF = 0; ERR = 0 so we should check for DRQ now.
	if (state & ATA_SR_DRDY)
		return 0; // No Error
	return 3; // unknown Error
}

static bool ata_set_multiple_mode(uint ch, uint drv, uint settings)
{
	int ret;
	struct ata_drive_info* drvinfo = &channels[ch].drvinfo[drv];

	// if bit 8 is set to one and bits (7:0) are cleared to zero in word settings
	// a SET MULTIPLE command is required before issuing a READ MULTIPLE
	// test result in Boschs shows Boschs doesn't set bit 8 while in VMWare
	// the bit 8 is set. so we ommit bit 8
	if ((settings & 0xFF) == drvinfo->multisect)
		return true;

	// select the drive
	if (!ide_select_drive(ch, drv, false, 0xA0))
		return false;

	ide_cmd_block_write(ch, ATA_REG_SECCOUNT0, drvinfo->multisect);

	// send ata set multiple mode
	ide_cmd_block_write(ch, ATA_REG_COMMAND, ATA_CMD_SET_MULTIPLE_MODE);
	if (ide_wait_cmd_result(ch))
		return false;

	ret = ide_cmd_block_read(ch, ATA_REG_ERROR);
	return (ret & ATA_ER_ABRT) ? false : true;
}

static bool ata_set_feature(uint channel, uint drive, uint subcmd, uint value)
{
	uint ret;

	// select the drive
	if (!ide_select_drive(channel, drive, false, 0xA0))
		return false;

	ide_cmd_block_write(channel, ATA_REG_FEATURES, subcmd);
	ide_cmd_block_write(channel, ATA_REG_SECCOUNT0, value);

	// send ata set features command
	ide_cmd_block_write(channel, ATA_REG_COMMAND, ATA_CMD_SET_FEATURES);
	if (ide_wait_cmd_result(channel))
		return false;

	ret = ide_cmd_block_read(channel, ATA_REG_ERROR);
	return (ret & ATA_ER_ABRT) ? false : true;
}

static bool ata_set_dma_pio_mode(uint channel, uint drive, uint type, uint mode)
{
	return ata_set_feature(channel, drive,
		ATA_SF_SUBCMD_SET_TRANSFER_MODE, type | mode);
}

static bool ata_device_identify(uint channel, uint drive, ide_identify_data_t* d)
{
	uint i, cnt;
	unsigned short *buf = (unsigned short*)d;
	kassert(channel < 2 && drive < 2);

	// select the drive
	if (!ide_wait_drive_ready(channel))
		return false;
	ide_select_drive(channel, drive, true, 0xA0);

	// send ata identify command
	ide_cmd_block_write(channel, ATA_REG_COMMAND, ATA_CMD_IDENTIFY);
	if (!ide_cmd_block_read(channel, ATA_REG_STATUS))
		return false; // drive not exists}

	// wait for ide controller ready
	ide_wait_ready(channel);

	// check if it is real ATA
	// if lbamid or lbahi doesn't equal 0
	// this is not an ATA
	if (ide_cmd_block_read(channel, ATA_REG_LBA1) || ide_cmd_block_read(channel, ATA_REG_LBA2))
		return false;
	
	// wait for DRQ set, means pending result ready
	if (ide_wait_identify_result(channel))
		return false;

	// now this shall be ATA
	// read identify data
	cnt = sizeof(ide_identify_data_t) / 2;
	for (i = 0; i < cnt; ++i, ++buf)
		*buf = inw(channels[channel].io_base + ATA_REG_DATA);
	for (i = cnt; i < 256; ++i)
		inw(channels[channel].io_base + ATA_REG_DATA);

	if (d->general_cfg & IDE_GENERAL_CFG_ATA_DEVICE)
	{
		// not an ata device
		return false;
	}

	// fix strings
	fixstr(d->serial_str, 20);
	fixstr(d->firmware_rev_str, 8);
	fixstr(d->model_num_str, 40);
	return true;
}

static int ide_setup_transfer(uint ch, unsigned long long blkno, uint nsects)
{
	unsigned int track;
	unsigned int head;
	unsigned int sector;

	struct ata_drive_info* drvinfo;
	struct ide_channel_state* sm = &channels[ch].sm;

	if (sm->drive == IDE_CHNL_SM_DRIVE_UNKNOWN)
		return 1;
	drvinfo = &channels[ch].drvinfo[ide_sm_drive_idx(ch)];
	if (!(drvinfo->flags & ATA_DRIVE_AVAILABLE))
		return 2;

	if (drvinfo->flags & ATA_DRIVE_LBA_SUPPORTED)
	{
		uint lbaH = blkno >> 32;
		uint lbaL = blkno & 0xFFFFFFFF;

		if (drvinfo->flags & ATA_DRIVE_LBA48_SUPPORTED)
		{
			ide_cmd_block_write(ch, ATA_REG_SECCOUNT0, (nsects >> 8));
			ide_cmd_block_write(ch, ATA_REG_LBA0, ((lbaL >> 24) & 0xFF));			// LBA4
			ide_cmd_block_write(ch, ATA_REG_LBA1, (lbaH & 0xFF));					// LBA5
			ide_cmd_block_write(ch, ATA_REG_LBA2, ((lbaH >> 8) & 0xFF));			// LBA6
			ide_cmd_block_write(ch, ATA_REG_SECCOUNT0, (nsects & 0xFF));
			ide_cmd_block_write(ch, ATA_REG_LBA0, (lbaL & 0xFF));					// LBA1
			ide_cmd_block_write(ch, ATA_REG_LBA1, ((lbaL >> 8) & 0xFF));			// LBA2
			ide_cmd_block_write(ch, ATA_REG_LBA2, ((lbaL >> 16) & 0xFF));			// LBA3
			ide_cmd_block_write(ch, ATA_REG_HDDEVSEL, (ide_sm_drive_idx(ch) << 4) | ATA_DEVSEL_FLAG_LBA);
			return 0;
		}
		else
		{
			track = (lbaL >> 8) & 0xFFFF;
			head = ((lbaL >> 24) & 0xF) | ATA_DEVSEL_FLAG_LBA;
			sector = lbaL & 0xFF;
		}
	}
	else
	{
		uint blkid = (uint)blkno;
		track = blkid / (drvinfo->ts.chs.heads * drvinfo->ts.chs.sectors);
		head = (blkid / drvinfo->ts.chs.sectors) % drvinfo->ts.chs.heads;
		sector = blkid % drvinfo->ts.chs.sectors + 1;
	}

	ide_cmd_block_write(ch, ATA_REG_FEATURES, 0);
	ide_cmd_block_write(ch, ATA_REG_SECCOUNT0, nsects);
	ide_cmd_block_write(ch, ATA_REG_LBA0, sector);
	ide_cmd_block_write(ch, ATA_REG_LBA1, track & 0xFF);
	ide_cmd_block_write(ch, ATA_REG_LBA2, (track >> 8));
	ide_cmd_block_write(ch, ATA_REG_HDDEVSEL, (ide_sm_drive_idx(ch) << 4) | (head & 0xFF));
	return 0;
}

static int ide_setup_dma(uint ch)
{
	uint cmd, tmp;
	struct ide_channel_state* sm = &channels[ch].sm;

	if (sm->access == IDE_CHNL_SM_ACCESS_READ)
		cmd = BM_CR_WRITE;
	else if (sm->access == IDE_CHNL_SM_ACCESS_WRITE)
		cmd = BM_CR_READ;
	else return 1;

	// Setup PRD table
	outl(channels[ch].prdt_page_idx * PAGE_SZ, channels[ch].bmide + BM_PRD_ADDR);

	// Specify read/write
	outb(cmd | BM_CR_STOP, channels[ch].bmide + BM_COMMAND_REG);
	
	// Clear INTR & ERROR flags
	tmp = inb(channels[ch].bmide + BM_STATUS_REG);
	outb(tmp | BM_SR_INT | BM_SR_ERR, channels[ch].bmide + BM_STATUS_REG);
	return 0;
}

static void ide_start_dma(uint ch)
{
	// Start DMA operation
	uint tmp = inb(channels[ch].bmide + BM_COMMAND_REG);
	outb(tmp | BM_CR_START, channels[ch].bmide + BM_COMMAND_REG);
}

static bool ide_stop_dma(uint ch)
{
	uint tmp = inb(channels[ch].bmide + BM_COMMAND_REG);

	// Stop DMA channel and check DMA status
	outb(tmp & ~BM_CR_START, channels[ch].bmide + BM_COMMAND_REG);

	// Get DMA status
	tmp = inb(channels[ch].bmide + BM_STATUS_REG);

	// Clear INTR && ERROR flags
	outb(tmp | BM_SR_INT | BM_SR_ERR, channels[ch].bmide + BM_STATUS_REG);

	// Check for DMA errors
	if (tmp & BM_SR_ERR) return false;
	return (tmp & BM_SR_INT) ? true : false;
}

static void show_prdt(uint ch)
{
	int i = 0;
	ata_prd_t* prd = channels[ch].prdt;
	do 
	{
		dbg_output1("%02u) %08x %8u\n", i, prd->phy_base_addr, prd->count);
	}
	while (i < ATA_MAX_PRD_CNT && !(prd[i++].flags & ATA_PRD_FLAG_EOT));		
}

static int do_ide_act_pio(uint ch)
{
	uint cmd;
	struct ide_channel_state* sm = &channels[ch].sm;
	struct ata_drive_info* drvinfo
		= &channels[ch].drvinfo[ide_sm_drive_idx(ch)];
	ata_prd_t* prd = &channels[ch].prdt[channels[ch].prdt_next_prd];

	// translate the phy_addr to virtual addr
	if (!(prd->flags & ATA_RPD_FLAG_VIRT_ADDR))
	{
		prd->phy_base_addr = virt_addr(IDX2PAGE(prd->phy_base_addr / PAGE_SZ));
		kassert(NULL != prd->phy_base_addr);
		prd->flags |= ATA_RPD_FLAG_VIRT_ADDR;
	}

	// translate the byte count to sector count
	// note that in PRD, count = 0 means count = 65536
	prd->count /= IDE_SECTOR_SIZE;
	if (!prd->count) prd->count = 65536 / IDE_SECTOR_SIZE;
	dbg_output3("[TRANSFER] ch = %u, id:%u, cnt:%u\n", ch, (uint)sm->blkid, prd->count);

	if (!ide_select_drive(ch, ide_sm_drive_idx(ch), false, 0))
		return 1;
	if (ide_setup_transfer(ch, sm->blkid, prd->count))
		return 2;

	sm->blkid += prd->count;

	if (drvinfo->flags & ATA_DRIVE_LBA48_SUPPORTED)
		cmd = (drvinfo->multisect > 1) ? ATA_CMD_MULTIREAD_PIO_EXT : ATA_CMD_READ_PIO_EXT;
	else cmd = (drvinfo->multisect > 1) ? ATA_CMD_MULTIREAD_PIO : ATA_CMD_READ_PIO;

	// Start read
	ide_cmd_block_write(ch, ATA_REG_COMMAND, cmd);
	return 0;
}

static int ide_act_pio(uint ch)
{
	channels[ch].prdt_next_prd = 0;
	return do_ide_act_pio(ch);
}

static int ide_act_udma(uint ch)
{
	struct ide_channel_state* sm = &channels[ch].sm;

	dbg_output3("[TRANSFER] ch = %u, id:%u, cnt:%u\n", ch, (uint)sm->blkid, sm->blocks);
	show_prdt(ch);

	if (!ide_select_drive(ch, ide_sm_drive_idx(ch), false, 0))
		return 1;
	if (ide_setup_transfer(ch, sm->blkid, sm->blocks))
		return 2;
	if (ide_setup_dma(ch))
		return 3;

	// Start read
	ide_cmd_block_write(ch, ATA_REG_COMMAND, \
		(channels[ch].drvinfo[ide_sm_drive_idx(ch)].flags & ATA_DRIVE_LBA48_SUPPORTED)	\
		? ATA_CMD_READ_DMA_EXT : ATA_CMD_READ_DMA);
	ide_start_dma(ch);
	return 0;
}

static int ide_exec(uint ch)
{
	struct ata_drive_info* drvinfo;
	struct ide_channel_state* sm = &channels[ch].sm;

	if (sm->drive == IDE_CHNL_SM_DRIVE_UNKNOWN)
		return 100;
	drvinfo = &channels[ch].drvinfo[ide_sm_drive_idx(ch)];
	if (!(drvinfo->flags & ATA_DRIVE_AVAILABLE))
		return 101;

	if (drvinfo->flags & ATA_DRIVE_DOWNGRADE_PIO_MODE)
		return ide_act_pio(ch);
	else if (drvinfo->flags & ATA_DRIVE_LBA_SUPPORTED)
		return ide_act_udma(ch);
	else return ide_act_pio(ch);
}

static bool do_detect_ata_device(uint channel, uint drive, ide_identify_data_t* ide_data)
{
	char* drive_id[] = {"master", "slave"};
	unsigned short *buf = (unsigned short*)ide_data;

	if (!ata_device_identify(channel, drive, ide_data))
		return false;

	printk("ata%u-%s: ", channel, drive_id[drive]);
	print_str(ide_data->model_num_str, 40);
	return true;
}

static void show_ata_status(uint ch, uint drv)
{
	int i;
	int dma_active, udma_active;
	int dma_support, udma_support;
	const char* drive_id[] = {"master", "slave"};

	dma_support = channels[ch].drvinfo[drv].dma_support_mode;
	udma_support = channels[ch].drvinfo[drv].udma_support_mode;
	dma_active = channels[ch].drvinfo[drv].dma_active_mode;
	udma_active = channels[ch].drvinfo[drv].udma_active_mode;

	if (dma_support >= 0)
	{
		printk("ata%u-%s: (dma 0", ch, drive_id[drv]);
		for (i = 1; i <= dma_support; ++i) printk(", %u", i);
		printk(" supported");
		if (dma_active >= 0) printk(", dma %u activated", dma_active);
		printk(")\n");
	}

	if (udma_support >= 0)
	{
		printk("ata%u-%s: (udma 0", ch, drive_id[drv]);
		for (i = 1; i <= udma_support; ++i) printk(", %u", i);
		printk(" supported");
		if (udma_active >= 0) printk(", udma %u activated", udma_active);
		printk(")\n");
	}
}

static void check_ata_device(uint channel, uint drive, ide_identify_data_t* d)
{
	int dma_support = -1;
	int udma_support = -1;
	int dma_active = -1;
	int udma_active = -1;
	uint dma = d->multi_word_dma_support;
	uint udma = d->ultra_dma_support_cur_mode;
	struct ata_drive_info* drv_info = &channels[channel].drvinfo[drive];

	if (!(d->curr_field_validity & 2)) udma = 0;

	if (d->capabilities & IDE_CAPABILITIES_DMA_SUPPORTED)
	{
		drv_info->flags |= ATA_DRIVE_LBA_SUPPORTED;

		// check for DMA support and activation
		if (dma & IDE_MULTIWORD_DMA_MODE_0_SUPPORTED)
			dma_support = 0; 
		if (dma & IDE_MULTIWORD_DMA_MODE_1_SUPPORTED)
			dma_support = 1;
		if (dma & IDE_MULTIWORD_DMA_MODE_1_SUPPORTED)
			dma_support = 2;

		if (dma_support >= 0)
		{
			if (dma & IDE_MULTIWORD_DMA_MODE_0_ACTIVE)
				dma_active = 0;
			else if (dma & IDE_MULTIWORD_DMA_MODE_1_ACTIVE)
				dma_active = 1;
			else if (dma & IDE_MULTIWORD_DMA_MODE_2_ACTIVE)
				dma_active = 2;
		}

		// check for UDMA support and activation
		if (udma & IDE_ULTRA_DMA_MODE_0_SUPPORTED)
			udma_support = 0;
		if (udma & IDE_ULTRA_DMA_MODE_1_SUPPORTED)
			udma_support = 1;
		if (udma & IDE_ULTRA_DMA_MODE_2_SUPPORTED)
			udma_support = 2;
		if (udma & IDE_ULTRA_DMA_MODE_3_SUPPORTED)
			udma_support = 3;
		if (udma & IDE_ULTRA_DMA_MODE_4_SUPPORTED)
			udma_support = 4;
		if (udma & IDE_ULTRA_DMA_MODE_5_SUPPORTED)
			udma_support = 5;
		if (udma & IDE_ULTRA_DMA_MODE_6_SUPPORTED)
			udma_support = 6;

		if (udma_support >= 0)
		{
			if (udma & IDE_ULTRA_DMA_MODE_0_ACTIVE)
				udma_active = 0;
			else if (udma & IDE_ULTRA_DMA_MODE_1_ACTIVE)
				udma_active = 1;
			else if (udma & IDE_ULTRA_DMA_MODE_2_ACTIVE)
				udma_active = 2;
			else if (udma & IDE_ULTRA_DMA_MODE_3_ACTIVE)
				udma_active = 3;
			else if (udma & IDE_ULTRA_DMA_MODE_4_ACTIVE)
				udma_active = 4;
			else if (udma & IDE_ULTRA_DMA_MODE_5_ACTIVE)
				udma_active = 5;
			else if (udma & IDE_ULTRA_DMA_MODE_6_ACTIVE)
				udma_active = 6;
		}
	}
	drv_info->dma_support_mode = (char)dma_support;
	drv_info->dma_active_mode = (char)dma_active;
	drv_info->udma_support_mode = (char)udma_support;
	drv_info->udma_active_mode = (char)udma_active;
	drv_info->multisect = d->sectors_rw_multicmd & 0xFF;
	if (!drv_info->multisect) drv_info->multisect = 1;
	else ata_set_multiple_mode(channel, drive, d->muli_sector_setting);

	if (d->cmd_sets_supported & IDE_CMD_SET_48BIT_LBA_SUPPORTED)
		drv_info->flags |= ATA_DRIVE_LBA48_SUPPORTED;
	if (d->usedmovsd) drv_info->flags |= ATA_DRIVE_USE_PIO_32BITS;
}

static void pci_enable_bus_master(uint bus, uint dev, uint func, uint ch, uint drv)
{
	uint val = pci_conf_read(bus, dev, func, PCI_IDE_COMMAND_REG, 2);
	if (val & 4) return;

	val |= 4;	// bus master enable
	pci_conf_write(bus, dev, func, PCI_IDE_COMMAND_REG, 2, val);
	dbg_output3("pci-ide: enable bus master mode\n");	
}

static void pci_enable_udma_mode(uint bus, uint dev, uint func, uint ch, uint drv)
{
	uint flag = 1 << (ch * 2 + drv);
	uint val = pci_conf_read(bus, dev, func, PCI_IDE_UDMA_CTRL_REG, 1);
	if (val & flag) return;

	val |= flag;
	pci_conf_write(bus, dev, func, PCI_IDE_UDMA_CTRL_REG, 1, val);
	dbg_output3("pci-ide: enable udma mode for %u:%u\n", ch, drv);
}

static void ata_calc_total_sectors(ide_identify_data_t* d, struct ata_drive_info* drv_info)
{
	if (drv_info->flags & ATA_DRIVE_LBA_SUPPORTED)
	{
		drv_info->ts.total_sectors = (drv_info->flags & ATA_DRIVE_LBA48_SUPPORTED)
			? d->lba48_total_sectors
			: d->lba_total_sectors;
	}
	else
	{
		drv_info->ts.chs.cyls = d->num_cylinders;
		drv_info->ts.chs.heads = d->num_heads;
		drv_info->ts.chs.sectors = d->num_sectors_per_track;
	}
}

static unsigned long long ata_get_total_sectors(struct ata_drive_info* drv_info)
{
	if (drv_info->flags & ATA_DRIVE_LBA_SUPPORTED)
		return drv_info->ts.total_sectors;
	else
	{
		unsigned long long ret = (uint)drv_info->ts.chs.cyls
			* (uint)drv_info->ts.chs.heads
			* (uint)drv_info->ts.chs.sectors;
		return (drv_info->ts.chs.cyls == 0xFFFF
			&& drv_info->ts.chs.heads == 0xFFFF
			&& drv_info->ts.chs.sectors == 0xFFFF)
			? 0 : ret;
	}
}

static void config_ata_device(uint bus, uint dev, uint func, uint ch, uint drv, ide_identify_data_t* d)
{
	struct ata_drive_info* drv_info = &channels[ch].drvinfo[drv];

	// set the drive as available
	drv_info->flags |= ATA_DRIVE_AVAILABLE;

	check_ata_device(ch, drv, d);

	// set the total sectors
	ata_calc_total_sectors(d, drv_info);

	// show info
	if (drv_info->flags & (ATA_DRIVE_LBA48_SUPPORTED | ATA_DRIVE_LBA_SUPPORTED))
		printk(" LBA%s", (drv_info->flags & ATA_DRIVE_LBA48_SUPPORTED) ? "48" : "");
	printk(" (%u MB)\n", ata_get_total_sectors(drv_info) * IDE_SECTOR_SIZE / 1024 / 1024);

	// Enable read ahead and write caching if supported
	if (d->cur_set_features_options & IDE_CUR_SET_FEATURES_OPT_READ_LOCK_AHEAD)
	{
		dbg_output3("ata%u-%u: read ahead enabled.\n", ch, drv);
		ata_set_feature(ch, drv, ATA_SF_ENABLE_RLA, 0);
	}
	if (d->cur_set_features_options & IDE_CUR_SET_FEATURES_OPT_WRITE_CACHE)
	{
		dbg_output3("ata%u-%u: write cache enabled.\n", ch, drv);
		ata_set_feature(ch, drv, ATA_SF_ENABLE_WCACHE, 0);
	}

	if (drv_info->udma_support_mode >= 0)
	{
		pci_enable_udma_mode(bus, dev, func, ch, drv);
		if (drv_info->udma_support_mode != drv_info->udma_active_mode)
		{
			// activate the udma mode
			drv_info->udma_active_mode = drv_info->udma_support_mode;
			dbg_output3("ata%u-%u: activate udma mode %u\n", ch, drv,drv_info->udma_active_mode);
			if (ata_set_dma_pio_mode(ch, drv, ATA_SF_SUBCMD_TRANSFER_MODE_UDMA,
				drv_info->udma_active_mode))
				goto enable_bus_master;
		}
		else goto enable_bus_master;
	}
	else if (drv_info->dma_support_mode >= 0)
	{
		if (drv_info->dma_support_mode != drv_info->dma_active_mode)
		{
			// activate the dma mode
			drv_info->dma_active_mode = drv_info->dma_support_mode;
			dbg_output3("ata%u-%u: activate dma mode %u\n", ch, drv,drv_info->dma_active_mode);
			if (ata_set_dma_pio_mode(ch, drv, ATA_SF_SUBCMD_TRANSFER_MODE_DMA,
				drv_info->dma_active_mode))
				goto enable_bus_master;
		}
		else goto enable_bus_master;
	}
	else if (ata_set_dma_pio_mode(ch, drv, ATA_SF_SUBCMD_TRANSFER_MODE_PIO, 0))
		goto show_info;

enable_bus_master:
	pci_enable_bus_master(bus, dev, func, ch, drv);
show_info:
	show_ata_status(ch, drv);
}

static void init_ata_channel_data(uint ch)
{
	// originally, all data in channels is 0
	init_spinlock(&channels[ch].sm.spinlock);
	channels[ch].sm.state = IDE_CHNL_SM_STAT_READY;

	listnode_init(channels[ch].drvinfo[0].req_queue);
	listnode_init(channels[ch].drvinfo[1].req_queue);
}

static int detect_ata_devices(uint bus, uint dev, uint func)
{
	ide_identify_data_t data;

	// check for channel 0
	ide_soft_reset(ATA_PRIMARY);
	if (inb(channels[ATA_PRIMARY].bmide + BM_STATUS_REG) & BM_SR_SIMPLEX)
	{
		printk("ata master bus DMA (primary): Simplex only mode not supported.\n");
		return 1;
	}
	init_ata_channel_data(ATA_PRIMARY);

	if (do_detect_ata_device(ATA_PRIMARY, 0, &data))
		config_ata_device(bus, dev, func, ATA_PRIMARY, 0, &data);
	if (do_detect_ata_device(ATA_PRIMARY, 1, &data))
		config_ata_device(bus, dev, func, ATA_PRIMARY, 1, &data);

	// check for channel 1
	ide_soft_reset(ATA_SECONDARY);
	if (inb(channels[ATA_SECONDARY].bmide + BM_STATUS_REG) & BM_SR_SIMPLEX)
	{
		printk("ata master bus DMA (secondary): Simplex only mode not supported.\n");
		return 2;
	}
	init_ata_channel_data(ATA_SECONDARY);

	if (do_detect_ata_device(ATA_SECONDARY, 0, &data))
		config_ata_device(bus, dev, func, ATA_SECONDARY, 0, &data);
	if (do_detect_ata_device(ATA_SECONDARY, 1, &data))
		config_ata_device(bus, dev, func, ATA_SECONDARY, 1, &data);
	return 0;
}

static int ide_ata_dev_init(void)
{
	uint bus, dev, func;
	if (!detect_pci_ide_controller(&bus, &dev, &func))
	{
		printk("Error: pci-ide controller not detected.\n");
		return 1;
	}

	dbg_output3("pci-ide controller found at <bus:%u, dev:%u, func:%u>\n", bus, dev, func);

	// setup registers via result form pci cfg
	setup_ide_channel_registers(bus, dev, func);
	if (detect_ata_devices(bus, dev, func))
		return 2;

	check_set_pci_ide_irq(bus, dev, func);
	return 0;
}

// no need of lock
static inline ata_prd_t* get_next_free_prd(uint ch)
{
	if (channels[ch].prdt_next_prd >= ATA_MAX_PRD_CNT)
		return NULL;
	return &channels[ch].prdt[channels[ch].prdt_next_prd++];
}

static int init_prdt(void)
{
	page_t *p;
	char* tmp = (char*)vmalloc(2);
	if (NULL == tmp) return 1;

	memset(tmp, 0, PAGE_SZ * 2);
	channels[ATA_PRIMARY].prdt = (ata_prd_t*)tmp;
	channels[ATA_SECONDARY].prdt = (ata_prd_t*)(tmp + PAGE_SZ);

	p = virt_addr_page((uint)tmp, true);
	if (!p) return 2;
	channels[ATA_PRIMARY].prdt_page_idx = PAGE_IDX(p);
	p = virt_addr_page((uint)(tmp + PAGE_SZ), true);
	if (!p) return 3;
	channels[ATA_SECONDARY].prdt_page_idx = PAGE_IDX(p);

	return 0;
}

static void ata_schedule(uint ch, uint drv);

// need lock
static int do_ide_read_async(uint ch, uint drv, uint blkid, uint blocks, void* buffer)
{
	int ret, isidle;
	unsigned int flags;
	dev_blk_req_node_t* req;
	dev_blk_reqlist_t* reqlist;
	listnode_t* queue = &channels[ch].drvinfo[drv].req_queue;
	listnode_t *lnode = queue->next;

	// make the request first
	if (dev_block_new_request(DEV_BLK_ACCESS_READ, IDE_SECTOR_SIZE, buffer, blkid, blocks, &req))
		return 1;

	spin_lock_irqsave(&channels[ch].sm.spinlock, flags);
	for (; lnode != queue; lnode = lnode->next)
	{
		reqlist = list_entry(dev_blk_reqlist_t, ownerlist, lnode);
		ret = dev_block_submit_request(reqlist, req);
		if (!ret) goto read_success;

		if (ret != DEV_BLK_REQ_ERR_ELEVATOR_NEXT_ROUND)
		{
			spin_unlock_irqrestore(&channels[ch].sm.spinlock, flags);
			dev_block_release_request(req);
			return 2;
		}
	}
	spin_unlock_irqrestore(&channels[ch].sm.spinlock, flags);
	
	reqlist = dev_reqlist_alloc(IDE_ELEVATOR_ROUND_MAX_SECTOR,
		DEV_BLK_REQLST_TYPE_ELEVATOR, IDE_SECTOR_SIZE);
	if (!reqlist) return 3;

	spin_lock_irqsave(&channels[ch].sm.spinlock, flags);
	ret = dev_block_submit_request(reqlist, req);
	spin_unlock_irqrestore(&channels[ch].sm.spinlock, flags);

	if (ret)
	{
		dev_block_release_request(req);
		return 4;
	}

	spin_lock_irqsave(&channels[ch].sm.spinlock, flags);
	listnode_add(*queue, reqlist->ownerlist);

read_success:

	// get the running state
	isidle = (channels[ch].sm.state == IDE_CHNL_SM_STAT_READY) ? 1 : 0;
	spin_unlock_irqrestore(&channels[ch].sm.spinlock, flags);

	if (isidle) ata_schedule(ch, 2);
	return 0;
}

#define ATA_HR_RES_COMMIT_PRDT			(1)

// bpp = blocks per page
static int ata_handle_request(uint ch, dev_blk_req_node_t* req, uint bpp)
{
	ata_prd_t *prd = ATA_CHNL_CUR_PRD(ch);
	const uint rwflgs = (DEV_BLK_NODE_FLAG_ACCESS_READ
		| DEV_BLK_NODE_FLAG_ACCESS_WRITE);

	/* state machine */
	struct ide_channel_state* sm = &channels[ch].sm;

	// this is the first request
	if (sm->access == IDE_CHNL_SM_ACCESS_UNKNOWN)
		sm->access = req->flags & rwflgs;
	// this is the second request, see if we can merge with the first
	else
	{
		// if the request access type not match with the current PRDT (first)
		// we just commit the PRDT
		if (sm->access != (req->flags & rwflgs))
			return ATA_HR_RES_COMMIT_PRDT;

		// see if this request could be merge to the PRDT
		if (sm->blkid != req->data.blkid && sm->blkid + sm->blocks != req->data.blkid)
			return ATA_HR_RES_COMMIT_PRDT;
	}

	while (req->data.blkcnt)
	{
		page_t *p;
		bool isfirst = false;

		// see if we need to get a new prd
		if (!ATA_PRD_VALID(ch, prd))
		{
			// all the PRD in PRDT has been used
			if (NULL == (prd = get_next_free_prd(ch)))
				return ATA_HR_RES_COMMIT_PRDT;

			// this is the first PRD in the PRDT, save the block id
			sm->blkid = req->data.blkid;
			isfirst = true;
		}
		
		// get the page to be used in next
		if (req->data.use_page_list)
		{
			listnode_t *n = req->data.buf.page_list->next;
			p = list_entry(page_t, ownerlist, n);
			lock_page(p);
		}
		else p = virt_addr_page((uint)req->data.buf.ptr, true);

		if (isfirst)
		{
			prd->phy_base_addr = PAGE_PHYADDR(p);
			prd->count = PAGE_SZ;
		}
		else

		// check if we can merge current request to current PRD
		if (prd->count && ((uint)prd->count + PAGE_SZ) < 65536
			&& (prd->phy_base_addr + prd->count == PAGE_PHYADDR(p)))
			prd->count += PAGE_SZ;

		else // we need a brand new PRD
		{
			// all the PRD in PRDT has been used
			if (NULL == (prd = get_next_free_prd(ch)))
			{
				unlock_page(p);
				return ATA_HR_RES_COMMIT_PRDT;
			}

			prd->phy_base_addr = PAGE_PHYADDR(p);
			prd->count = PAGE_SZ;
		}

		// set PRD flags
		prd->flags = 0;
		dbg_output3("[COMMIT] %08x:%08x id:%u, cnt:%u\n", 0, req, (uint)req->data.blkid, req->data.blkcnt);
		// update request information
		if (req->data.use_page_list)
		{
			// put the page to the tail of list
			listnode_del(p->ownerlist);
			listnode_add(*(req->data.buf.page_list), p->ownerlist);
		}
		else req->data.buf.ptr += PAGE_SZ;

		// adjust the remaining size
		if (req->data.blkcnt > bpp)
		{
			req->data.blkid += bpp;
			sm->blocks += bpp;
			req->data.blkcnt -= bpp;
		}
		else
		{
			req->data.blkid += req->data.blkcnt;
			sm->blocks += req->data.blkcnt;
			req->data.blkcnt = 0;
		}
	}
	return 0;	
}

static dev_blk_reqlist_t* ata_get_recent_reqlist(uint ch)
{
	dev_blk_reqlist_t* reqlist = channels[ch].sm.reqlst;
	if (NULL == reqlist)
	{
		listnode_t* n;
		unsigned int flags;
		dev_blk_reqlist_t *r1 = NULL;
		dev_blk_reqlist_t *r2 = NULL;

		spin_lock_irqsave(&channels[ch].sm.spinlock, flags);
		if (!listnode_isempty(channels[ch].drvinfo[0].req_queue))
		{
			n = channels[ch].drvinfo[0].req_queue.next;
			r1 = list_entry(dev_blk_reqlist_t, ownerlist, n);
		}
		if (!listnode_isempty(channels[ch].drvinfo[1].req_queue))
		{
			n = channels[ch].drvinfo[1].req_queue.next;
			r2 = list_entry(dev_blk_reqlist_t, ownerlist, n);
		}
		spin_unlock_irqrestore(&channels[ch].sm.spinlock, flags);

		if (r1 && r2) reqlist = (r1->ts_fstreq < r2->ts_fstreq) ? r1 : r2;
		else if (r1) reqlist = r1;
		else if (r2) reqlist = r2;
		else return NULL;

		channels[ch].sm.drive = (reqlist == r1) ? 1 : 2;

		// the ATA only support the elevator type request list
		if (!(reqlist->flags & DEV_BLK_REQLST_TYPE_ELEVATOR)) return NULL;

		// set the indicator that the current reqlist is in running
		reqlist->flags |= DEV_BLK_REQLST_RUNNING;

		// cache to state machine
		channels[ch].sm.reqlst = reqlist;
	}
	return reqlist;
}

#define ata_reset_prdt(ch)		do { channels[ch].prdt_next_prd = 0; } while (0)
#define ata_reset_recent_reqlist(ch)	do { channels[ch].sm.reqlst = NULL; } while (0)

static void ata_reinitialize_statemachine(uint ch)
{
	struct ide_channel_state* sm = &channels[ch].sm;
	sm->drive = IDE_CHNL_SM_DRIVE_UNKNOWN;
	sm->access = IDE_CHNL_SM_ACCESS_UNKNOWN;
	sm->is_retry = 0;
	sm->retry_count = 0;
	sm->blocks = 0;
	sm->blkid = 0;
}

static bool ata_commit_prdt(uint ch)
{
	ATA_CHNL_CUR_PRD(ch)->flags = ATA_PRD_FLAG_EOT;
	ata_reset_prdt(ch);
	return true;
}

static bool ata_prepare_prdt(uint ch)
{
	unsigned int flags;

	for (;;)
	{
		avl_node_t* node;
		dev_blk_reqlist_t* reqlist = ata_get_recent_reqlist(ch);
		if (NULL == reqlist) return false;

		spin_lock_irqsave(&channels[ch].sm.spinlock, flags);
		node = avl_first(reqlist->h.elevator);
		spin_unlock_irqrestore(&channels[ch].sm.spinlock, flags);

		for (;;)
		{
			int ret;
			dev_blk_req_node_t *req;

			// all the request in the list has been committed
			// so we commit the PRDT
			if (!node)
			{
				kassert(reqlist == channels[ch].sm.reqlst);
				ata_reset_recent_reqlist(ch);
				return ata_commit_prdt(ch);
			}

			req = AVLNODE_ENTRY(dev_blk_req_node_t, u.avlnode, node);
			if (!req->data.blkcnt) continue;

			ret = ata_handle_request(ch, req, PAGE_SZ / IDE_SECTOR_SIZE);
			if (ATA_HR_RES_COMMIT_PRDT == ret)
				return ata_commit_prdt(ch);

			spin_lock_irqsave(&channels[ch].sm.spinlock, flags);
			node = avl_next(node);
			spin_unlock_irqrestore(&channels[ch].sm.spinlock, flags);
		}
	}
	return true;
}

#define ata_set_state(ch, s)	do { channels[ch].sm.state = (s); } while (0)

// this need lock
static bool ata_queue_empty_and_stay_ready(uint ch)
{
	int ret = false;
	unsigned int flags;

	spin_lock_irqsave(&channels[ch].sm.spinlock, flags);
	if (listnode_isempty(channels[ch].drvinfo[0].req_queue)
		&& listnode_isempty(channels[ch].drvinfo[1].req_queue))
	{
		ata_set_state(ch, IDE_CHNL_SM_STAT_READY);
		ret = true;
	}
	spin_unlock_irqrestore(&channels[ch].sm.spinlock, flags);
	return ret;
}

static int do_ide_async_notify(uint ch, uint drv)
{
	unsigned int flags;
	listnode_t *queue = &channels[ch].drvinfo[drv].req_queue;
	listnode_t notify_list = LISTNODE_INITIALIZER(notify_list);
	listnode_t *tmp, *lnode = queue->next;

	spin_lock_irqsave(&channels[ch].sm.spinlock, flags);
	for (; lnode != queue; lnode = tmp)
	{
		avl_node_t *avlnode;
		dev_blk_reqlist_t* reqlist;

		tmp = lnode->next;
		reqlist = list_entry(dev_blk_reqlist_t, ownerlist, lnode);

		avlnode = avl_first(reqlist->h.elevator);
		while (avlnode)
		{
			dev_blk_req_node_t* req = AVLNODE_ENTRY(dev_blk_req_node_t, u.avlnode, avlnode);
			avlnode = avl_next(avlnode);

			if (!req->data.blkcnt)
			{
				avl_remove(&reqlist->h.elevator, &req->u.avlnode);
				listnode_add(notify_list, req->u.ownerlist);
				dbg_output3("[NOTIFY] %08x:%08x id:%u, cnt:%u\n", reqlist, req, (uint)req->data.blkid, req->data.blkcnt);
			}
			else break;
		}

		// if all requests removed from the reqlist, we release the reqlist
		if (NULL == reqlist->h.elevator)
		{
			listnode_del(reqlist->ownerlist);
			dev_reqlist_release(reqlist);
		}
	}
	spin_unlock_irqrestore(&channels[ch].sm.spinlock, flags);
	return 0;
}

static void ata_schedule(uint ch, uint drv)
{
	if (channels[ch].sm.state == IDE_CHNL_SM_STAT_READY)
	{
		if (!ata_queue_empty_and_stay_ready(ch))
			goto state_change_to_preparing;
	}
	else if (channels[ch].sm.state == IDE_CHNL_SM_STAT_EXECUTING)
	{
		// before entering here, the tasklet shall
		// finish handling the interrupt

		// todo: do notify 
		do_ide_async_notify(ch, drv);
		ata_reinitialize_statemachine(ch);
		
		// check if there is any pending requests
		if (!ata_queue_empty_and_stay_ready(ch))
			goto state_change_to_preparing;
	}
	return;

state_change_to_preparing:

	ata_set_state(ch, IDE_CHNL_SM_STAT_PREPARING);
	if (!ata_prepare_prdt(ch))
	{
		// todo: error set and notify
		ata_reset_prdt(ch);
		ata_set_state(ch, IDE_CHNL_SM_STAT_READY);
		return;
	}

	ata_set_state(ch, IDE_CHNL_SM_STAT_EXECUTING);

	// todo: issue the execution commands
	ide_exec(ch);
}

static bool ide_act_retry(uint ch, uint drv)
{
	struct ide_channel_state* sm = &channels[ch].sm;
	struct ata_drive_info* drvinfo = &channels[ch].drvinfo[drv];

	if (!sm->is_retry)
	{
		sm->is_retry = 1;
		sm->retry_count = 0;
	}

	if (sm->retry_count >= IDE_CHNL_SM_MAX_RETRIES_CNT)
	{
		if (!(drvinfo->flags & ATA_DRIVE_DOWNGRADE_PIO_MODE))
		{
			dbg_output1("ata%u-%u: fatal error: downgrade to PIO mode.\n", ch, drv);
			drvinfo->flags |= ATA_DRIVE_DOWNGRADE_PIO_MODE;
			sm->retry_count = 0;
		}
		else goto ide_retry_fail;
	}

	sm->retry_count++;
	dbg_output2("ata%u-%u: recover from error (%u)\n", ch, drv, sm->retry_count);
	if (ide_exec(ch)) goto ide_retry_fail;
	
	return true;

ide_retry_fail:
	sm->is_retry = 0;
	sm->retry_count = 0;
	return false;
}
unsigned char* ppp=NULL;

static void pio_read_buffer(uint ch, struct ata_drive_info* drvinfo, void* buffer, uint size)
{
	uint ioaddr = channels[ch].io_base + ATA_REG_DATA;
	if (drvinfo->flags & ATA_DRIVE_USE_PIO_32BITS)
		insl(ioaddr, buffer, size / 4);
	else insw(ioaddr, buffer, size / 2);
}

// tasklets
static void tasklet_handle_pio(uint ch, struct ata_drive_info* drvinfo)
{
	ata_prd_t* prd;
	uint stat, nsects, i;
	struct ide_channel_state* sm = &channels[ch].sm;

	stat = ide_cmd_block_read(ch, ATA_REG_STATUS);

	// check DRQ and ERR
	if ((!(stat & ATA_SR_DRQ)) || (stat & ATA_SR_ERR))
	{
		if (!ide_act_retry(ch, ide_sm_drive_idx(ch)))
			; // error notify
		return;
	}

	// Read sector data
	prd = &channels[ch].prdt[channels[ch].prdt_next_prd];
	nsects = drvinfo->multisect;
	if (nsects > prd->count) nsects = prd->count;
	
	dbg_output3("ata-pio: read %u sectors to %x\n", nsects, prd->phy_base_addr);
	for (i = 0; i < nsects; i++)
	{
		pio_read_buffer(ch, drvinfo, (void*)prd->phy_base_addr, IDE_SECTOR_SIZE);
		prd->phy_base_addr += IDE_SECTOR_SIZE;
	}

	prd->count -= nsects;
	if (prd->count)
	{
		// we need to wait for next interrupt
		// to transfer the data again
		return;
	}

	// finished this PRD, see if this is the last one
	if (prd->flags & ATA_PRD_FLAG_EOT)
	{
		// we finished all PRD in PRDT
		ata_reset_prdt(ch);

		// todo: all finished
		// finished
		return;
	}
	else
	{
		// rewind the virtual addr to original
		prd->phy_base_addr -= sm->blocks * IDE_SECTOR_SIZE;

		// handle next PRD in PRDT
		channels[ch].prdt_next_prd++;
		if (do_ide_act_pio(ch))
		{
			// todo: error notify
			return;
		}
	}
}

static void tasklet_handle_udma(uint ch, struct ata_drive_info* drvinfo)
{
	uint stat;
	struct ide_channel_state* sm = &channels[ch].sm;

	stat = ide_cmd_block_read(ch, ATA_REG_STATUS);
	if (!ide_stop_dma(ch))
	{
		// error: retry
		if (!ide_act_retry(ch, ide_sm_drive_idx(ch)))
			; // error notify
		return;
	}

	if (stat & ATA_SR_ERR)
	{
		// error: retry
		if (!ide_act_retry(ch, ide_sm_drive_idx(ch)))
			; // error notify
		return;
	}

	// finished
	if (ppp)
	{
		int i, j;
		for (i = 0; i < 32; ++i)
		{
			printk("\n");
			for (j = 0; j < 16; ++j)
				printk("%02X ", ppp[i * 16 + j]);
			for (j = 0; j < 16; ++j)
			{
				unsigned char c = ppp[i * 16 + j];
				if (!((c >= '0' && c <= '0')
					|| (c >= 'A' && c <= 'Z')
					|| (c >= 'a' && c <= 'z')))
					c = '.';
				printk("%c", c);
			}
		}
	}

	//ata_schedule(ch, ide_sm_drive_idx(ch));
}

int dev_ide_ata_init(void)
{
	int ret;
	
	ret = ide_ata_dev_init();
	if (ret) return ret;

	ret = init_prdt();

	// test:
	ppp = vmalloc(3);
	do_ide_read_async(0, 0, 0, 24, ppp);
	return ret;
}

// test for win32
#ifdef TEST_DBG_WIN32
irq_desc_t irq_desc[NR_IRQS];
tasklet_t *tasklet_vec[NR_CPUS] = {0};
irq_cpu_stat_t irq_stat[NR_CPUS] = {0};
vma_t kernel_area;
vma_t page_entry_table_area;
vma_t krnl_proc_preoccupied_area;
int _request_irq(uint irq, irqaction_t *action) { return 0; }
void* vmalloc(size_t pages) { return malloc(pages * PAGE_SZ); }

void ide_test(void)
{
	int ret;
	char *ptr = (char*)malloc(PAGE_SZ * 20);

	listnode_init(channels[0].drvinfo[0].req_queue);
	listnode_init(channels[0].drvinfo[1].req_queue);
	channels[0].prdt = (ata_prd_t*)malloc(PAGE_SZ);
	memset(channels[0].prdt, 0, PAGE_SZ);

	ret = do_ide_read_async(0, 0, 0, 8, (void*)(((uint)ptr) & ~(PAGE_SZ - 1)));
	kassert(!ret);
	ret = do_ide_read_async(0, 0, 0, 8, (void*)(((uint)(ptr + PAGE_SZ * 10)) & ~(PAGE_SZ - 1)));
	kassert(!ret);
	ret = do_ide_read_async(0, 0, 8, 8, (void*)(((uint)(ptr + PAGE_SZ)) & ~(PAGE_SZ - 1)));
	kassert(!ret);
	ret = do_ide_read_async(0, 0, 32, 8, (void*)(((uint)(ptr + PAGE_SZ * 2)) & ~(PAGE_SZ - 1)));
	kassert(!ret);
	ata_schedule(0, 0);
	ata_schedule(0, 0);
	ata_schedule(0, 0);
}

#endif

/* EOF */
