#include "qelib.h"
#include "qe_sdhci.h"



QELOG_DOMAIN("sdhci");



#define SDHCI_GET_VERSION(x) (x->version & SDHCI_SPEC_VER_MASK)



static inline void sdhci_writel(qe_sdhci_host *host, qe_uint reg, qe_u32 val)
{
    volatile qe_u32 *p = (volatile qe_u32 *)(host->ioaddr + reg);
    *p = val;
}

static inline void sdhci_writew(qe_sdhci_host *host, qe_uint reg, qe_u16 val)
{
    volatile qe_u16 *p = (volatile qe_u16 *)(host->ioaddr + reg);
    *p = val;
}

static inline void sdhci_writeb(qe_sdhci_host *host, qe_uint reg, qe_u8 val)
{
    volatile qe_u8 *p = (volatile qe_u8 *)(host->ioaddr + reg);
    *p = val;
}

static inline qe_u8 sdhci_readb(qe_sdhci_host *host, qe_uint reg)
{
    return *(volatile qe_u8 *)(host->ioaddr + reg);
}

static inline qe_u16 sdhci_readw(qe_sdhci_host *host, qe_uint reg)
{
    return *(volatile qe_u16 *)(host->ioaddr + reg);
}

static inline qe_u32 sdhci_readl(qe_sdhci_host *host, qe_uint reg)
{
    return *(volatile qe_u32 *)(host->ioaddr + reg);
}

static void sdhci_reset(qe_sdhci_host *host, qe_u8 mask)
{
    qe_uint timeout;

    /* Wait max 100ms */
    timeout = 100;
    sdhci_writeb(host, SDHCI_REG_RESET, mask);

    while (sdhci_readb(host, SDHCI_REG_RESET) & mask) {
        
        if (timeout == 0) {
            qe_warning("reset timeout");
            return;
        }

        timeout--;
        qe_usleep(1000);
    }

    qe_debug("reset done");
}

static qe_ret sdhci_set_clock(qe_sdhci_host *host, qe_uint clock)
{
	unsigned int div, clk = 0, timeout;

	/* Wait max 20 ms */
	timeout = 200;
	while (sdhci_readl(host, SDHCI_REG_PRESENT_STATE) &
			   (SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT)) {
		if (timeout == 0) {
			qe_error("timeout to wait cmd & data inhibit");
			return qe_err_busy;
		}
		timeout--;
		qe_usleep(100);
	}

	sdhci_writew(host, SDHCI_REG_CLOCK_CTRL, 0);

	if (clock == 0)
		return qe_ok;

	if (host->ops && host->ops->set_delay)
		host->ops->set_delay(host);

	if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
		/*
		 * Check if the Host Controller supports Programmable Clock
		 * Mode.
		 */
		if (host->clk_mul) {
			for (div = 1; div <= 1024; div++) {
				if ((host->max_clk / div) <= clock)
					break;
			}

			/*
			 * Set Programmable Clock Mode in the Clock
			 * Control register.
			 */
			clk = SDHCI_PROG_CLOCK_MODE;
			div--;
		} else {
			/* Version 3.00 divisors must be a multiple of 2. */
			if (host->max_clk <= clock) {
				div = 1;
			} else {
				for (div = 2;
				     div < SDHCI_MAX_DIV_SPEC_300;
				     div += 2) {
					if ((host->max_clk / div) <= clock)
						break;
				}
			}
			div >>= 1;
		}
	} else {
		/* Version 2.00 divisors must be a power of 2. */
		for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
			if ((host->max_clk / div) <= clock)
				break;
		}
		div >>= 1;
	}

	if (host->ops && host->ops->set_clock)
		host->ops->set_clock(host, div);

	clk |= (div & SDHCI_CC_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
	clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
		<< SDHCI_DIVIDER_HI_SHIFT;
	clk |= SDHCI_CLOCK_INT_EN;
	sdhci_writew(host, SDHCI_REG_CLOCK_CTRL, clk);

	/* Wait max 20 ms */
	timeout = 20;
	while (!((clk = sdhci_readw(host, SDHCI_REG_CLOCK_CTRL))
		& SDHCI_CLOCK_INT_STABLE)) {
		if (timeout == 0) {
			qe_error("Internal clock never stabilised.");
			return qe_err_busy;
		}
		timeout--;
		qe_usleep(1000);
	}

	clk |= SDHCI_CLOCK_CARD_EN;
	sdhci_writew(host, SDHCI_REG_CLOCK_CTRL, clk);
	return qe_ok;
}

static void sdhci_cmd_done(qe_sdhci_host *host, qe_mmc_cmd *cmd)
{
	int i;
	if (cmd->resp_type & MMC_RSP_136) {
		/* CRC is stripped so we need to do some shifting. */
		for (i = 0; i < 4; i++) {
			cmd->response[i] = sdhci_readl(host,
					SDHCI_REG_RESPONSE + (3-i)*4) << 8;
			if (i != 3)
				cmd->response[i] |= sdhci_readb(host,
						SDHCI_REG_RESPONSE + (3-i)*4-1);
		}
	} else {
		cmd->response[0] = sdhci_readl(host, SDHCI_REG_RESPONSE);
	}
}

static void sdhci_set_power(qe_sdhci_host *host, qe_u16 power)
{
    qe_u8 pwr = 0;

    if (power != (qe_u16)-1) {
        switch (1 << power) {
        case MMC_VDD_165_195:
            pwr = SDHCI_POWER_180;
            qe_debug("set power 1.8v");
            break;
		case MMC_VDD_29_30:
		case MMC_VDD_30_31:
			pwr = SDHCI_POWER_300;
            qe_debug("set power 3.0v");
			break;
		case MMC_VDD_32_33:
		case MMC_VDD_33_34:
			pwr = SDHCI_POWER_330;
            qe_debug("set power 3.3v");
			break;
        }
    }

    if (pwr == 0) {
        sdhci_writeb(host, SDHCI_REG_POWER_CTRL, 0);
    }

    pwr |= SDHCI_POWER_ON;

    sdhci_writeb(host, SDHCI_REG_POWER_CTRL, pwr);
}

qe_ret qe_sdhci_setup_cfg(qe_sdhci_host *host, qe_mmc_config *cfg, qe_u32 f_max, qe_u32 f_min)
{
    qe_u32 caps, caps_1;

    qe_debug("read cap");
    caps = sdhci_readl(host, SDHCI_REG_CAPABILITIES);
    qe_debug("%s caps:%x", host->mmc->dev.name, caps);

    /* Check if host support SDMA */
    if (!(caps & SDHCI_CAP_SDMA)) {
        qe_debug("%s don't support SDMA", host->mmc->dev.name);
    } else {
        qe_debug("%s support SDMA", host->mmc->dev.name);
        host->flags |= SDHCI_USE_SDMA;
    }

    /* Check if host support ADMA */
    if (!(caps & SDHCI_CAP_ADMA2)) {
        qe_debug("%s don't support ADMA", host->mmc->dev.name);
    } else {
        qe_debug("%s support ADMA", host->mmc->dev.name);
        qe_debug("adma tab size %d", SDHCI_ADMA_TAB_SIZE);
        host->adma_desc_tab = qe_memalign(32, 32 * sizeof(qe_sdhci_adma_desc));
        qe_assert(host->adma_desc_tab);
        host->flags |= SDHCI_USE_ADMA;
    }

    if (host->quirks & SDHCI_QUIRK_REG32_RW) {
        host->version = sdhci_readl(host, SDHCI_REG_VERSION - 2) >> 16;
    } else {
        host->version = sdhci_readw(host, SDHCI_REG_VERSION);
    }
    qe_debug("version %x", host->version);

    /* Check whether the clock multiplier is supported or not */
    if ((host->version & SDHCI_SPEC_VER_MASK) >= SDHCI_SPEC_300) {
        caps_1 = sdhci_readl(host, SDHCI_REG_CAPABILITIES);
        qe_debug("%s caps1:%x", host->mmc->dev.name, caps_1);
        host->clk_mul = (caps_1 & SDHCI_CLOCK_MUL_MASK) >> SDHCI_CLOCK_MUL_SHIFT;
    }

    if (host->max_clk == 0) {
        if ((host->version & SDHCI_SPEC_VER_MASK) >= SDHCI_SPEC_300) {
			host->max_clk = (caps & SDHCI_CLOCK_V3_BASE_MASK) >>
				SDHCI_CLOCK_BASE_SHIFT;
        } else {
			host->max_clk = (caps & SDHCI_CLOCK_BASE_MASK) >>
				SDHCI_CLOCK_BASE_SHIFT;
        }
        host->max_clk *= 1000000;
		if (host->clk_mul)
			host->max_clk *= host->clk_mul;
    }

    if (host->max_clk == 0) {
        qe_error("%s hardware doesn't specify base clock frequency", host->mmc->dev.name);
        return qe_err_param;
    }

    if (f_max && (f_max < host->max_clk)) {
        cfg->f_max = f_max;
    } else {
        cfg->f_max = host->max_clk;
    }

    if (f_min) {
        cfg->f_min = f_min;
    } else {
        if ((host->version & SDHCI_SPEC_VER_MASK) >= SDHCI_SPEC_300) {
            cfg->f_min = cfg->f_max / SDHCI_MAX_DIV_SPEC_300;
        } else {
            cfg->f_min = cfg->f_max / SDHCI_MAX_DIV_SPEC_200;
        }
    }

    cfg->voltages = 0;

    if (caps & SDHCI_CAP_VDD_330)
		cfg->voltages |= MMC_VDD_32_33 | MMC_VDD_33_34;
	if (caps & SDHCI_CAP_VDD_300)
		cfg->voltages |= MMC_VDD_29_30 | MMC_VDD_30_31;
	if (caps & SDHCI_CAP_VDD_180)
		cfg->voltages |= MMC_VDD_165_195;

	if (host->quirks & SDHCI_QUIRK_BROKEN_VOLTAGE)
		cfg->voltages |= host->voltages;

	cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz | MMC_MODE_4BIT;

    /* Since Host Controller Version3.0 */
	if ((host->version & SDHCI_SPEC_VER_MASK) >= SDHCI_SPEC_300) {
		if (!(caps & SDHCI_CAP_8BIT))
			cfg->host_caps &= ~MMC_MODE_8BIT;
	}

	if (host->quirks & SDHCI_QUIRK_BROKEN_HISPD_MODE) {
		cfg->host_caps &= ~MMC_MODE_HS;
		cfg->host_caps &= ~MMC_MODE_HS_52MHz;
	}

	if (!(cfg->voltages & MMC_VDD_165_195) ||
	    (host->quirks & SDHCI_QUIRK_NO_1_8_V))
		caps_1 &= ~(SDHCI_CAP_SDR104 | SDHCI_CAP_SDR50 |
			    SDHCI_CAP_DDR50);

	if (caps_1 & (SDHCI_CAP_SDR104 | SDHCI_CAP_SDR50 |
		      SDHCI_CAP_DDR50))
		cfg->host_caps |= MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25);

	if (caps_1 & SDHCI_CAP_SDR104) {
		cfg->host_caps |= MMC_CAP(UHS_SDR104) | MMC_CAP(UHS_SDR50);
		/*
		 * SD3.0: SDR104 is supported so (for eMMC) the caps2
		 * field can be promoted to support HS200.
		 */
		cfg->host_caps |= MMC_CAP(MMC_HS_200);
	} else if (caps_1 & SDHCI_CAP_SDR50) {
		cfg->host_caps |= MMC_CAP(UHS_SDR50);
	}

	if (caps_1 & SDHCI_CAP_DDR50)
		cfg->host_caps |= MMC_CAP(UHS_DDR50);

	if (host->host_caps)
		cfg->host_caps |= host->host_caps;

	cfg->b_max = 127;

    host->mmc = qe_malloc(sizeof(qe_mmc));
    host->mmc->cfg = cfg;

    qe_debug("cfg:%p fmax:%d fmin:%d", host->mmc->cfg, cfg->f_max, cfg->f_min);

    return qe_ok;
}

static inline int generic_fls(int x)
{
	int r = 32;

	if (!x)
		return 0;
	if (!(x & 0xffff0000u)) {
		x <<= 16;
		r -= 16;
	}
	if (!(x & 0xff000000u)) {
		x <<= 8;
		r -= 8;
	}
	if (!(x & 0xf0000000u)) {
		x <<= 4;
		r -= 4;
	}
	if (!(x & 0xc0000000u)) {
		x <<= 2;
		r -= 2;
	}
	if (!(x & 0x80000000u)) {
		x <<= 1;
		r -= 1;
	}
	return r;
}

#define ROUND(a,b)		(((a) + (b) - 1) & ~((b) - 1))
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))

static void sdhci_adma_desc(qe_sdhci_host *host, char *buf, qe_u16 len, qe_bool end)
{
    qe_u8 attr;
    qe_sdhci_adma_desc *desc;

    desc = &host->adma_desc_tab[host->desc_slot];

    attr = SDHCI_ADMA_DESC_ATTR_VALID | SDHCI_ADMA_DESC_TRANSFER_DATA;
	if (!end)
		host->desc_slot++;
	else
		attr |= SDHCI_ADMA_DESC_ATTR_END;
    desc->attr = attr;
	desc->len = len;
	desc->reserved = 0;
	desc->addr_lo = (qe_ubase)buf;
}

static void sdhci_prepare_adma_table(qe_sdhci_host *host, qe_mmc_data *data)
{
    qe_uint trans_bytes = data->blocksize * data->blocks;
    qe_uint desc_count = DIV_ROUND_UP(trans_bytes, SDHCI_ADMA_MAX_LEN);
    int i = desc_count;
    char *buf;

    host->desc_slot = 0;

	if (data->flags & MMC_DATA_READ)
		buf = data->dest;
	else
		buf = (char *)data->src;

    qe_debug("i:%d", i);
	while (--i) {
		sdhci_adma_desc(host, buf, SDHCI_ADMA_MAX_LEN, qe_false);
		buf += SDHCI_ADMA_MAX_LEN;
		trans_bytes -= SDHCI_ADMA_MAX_LEN;
	}

    sdhci_adma_desc(host, buf, trans_bytes, qe_true);

     qe_dcache_flush_range((qe_ubase)host->adma_desc_tab, ROUND(desc_count * sizeof(qe_sdhci_adma_desc),
         64));
}

static void sdhci_prepare_dma(qe_sdhci_host *host, qe_mmc_data *data, qe_bool *is_aligned, qe_uint trans_bytes)
{
    qe_u8 ctrl;

    host->flags &= ~SDHCI_USE_SDMA;

    if (data->flags == MMC_DATA_READ) {
        host->start_addr = (qe_ubase)data->dest;
    } else {
        host->start_addr = (qe_ubase)data->src;
    }
     qe_debug("prepare dma trans:%d %p", trans_bytes, host->start_addr);

    ctrl = sdhci_readb(host, SDHCI_REG_HOST_CTRL);
    ctrl &= ~SDHCI_CTRL_DMA_MASK;
    if (host->flags & SDHCI_USE_ADMA64)
        ctrl |= SDHCI_CTRL_ADMA64;
    else if (host->flags & SDHCI_USE_ADMA)
        ctrl |= SDHCI_CTRL_ADMA32;
    sdhci_writeb(host, SDHCI_REG_HOST_CTRL, ctrl);
    qe_debug("set SDHCI_REG_HOST_CTRL %x", ctrl);

    if (host->flags & SDHCI_USE_SDMA) {
		if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) &&
		    (host->start_addr & 0x7) != 0x0) {
            qe_debug("using aligned buffer");
			*is_aligned = qe_false;
			host->start_addr = (qe_ubase)host->aligned_buffer;
			if (data->flags != MMC_DATA_READ)
				qe_memcpy(host->aligned_buffer, data->src, trans_bytes);
		}
        sdhci_writel(host, SDHCI_REG_DMA_ADDRESS, host->start_addr);
    } else if (host->flags & (SDHCI_USE_ADMA | SDHCI_USE_ADMA64)) {
        sdhci_prepare_adma_table(host, data);
        sdhci_writel(host, SDHCI_REG_ADMA_ADDRESS, host->adma_addr);
        if (host->flags & SDHCI_USE_ADMA64)
            sdhci_writel(host, SDHCI_REG_ADMA_ADDRESS_HI, (qe_u64)host->adma_addr >> 32);
    }

    qe_dcache_flush_range(host->start_addr, trans_bytes);
}

static void sdhci_transfer_pio(qe_sdhci_host *host, qe_mmc_data *data)
{
	int i;
	char *offs;
	for (i = 0; i < data->blocksize; i += 4) {
		offs = data->dest + i;
		if (data->flags == MMC_DATA_READ)
			*(qe_u32 *)offs = sdhci_readl(host, SDHCI_REG_BUFFER);
		else
			sdhci_writel(host, SDHCI_REG_BUFFER, *(qe_u32 *)offs);
	}
}

static qe_ret sdhci_transfer_data(qe_sdhci_host *host, qe_mmc_data *data)
{
    qe_u32 rdy;
    qe_u32 mask;
    qe_u32 stat = 0;
    qe_u32 block = 0;
    qe_u32 timeout;
    qe_ubase start_addr = host->start_addr;
    qe_bool transfer_done = qe_false;

    timeout = 1000000;

    // qe_debug("SDHCI_REG_PRESENT_STATE %x", sdhci_readl(host, SDHCI_REG_PRESENT_STATE));
    // qe_debug("SDHCI_REG_INT_ENABLE %x", sdhci_readl(host, SDHCI_REG_INT_ENABLE));
    // qe_debug("SDHCI_REG_SIG_ENABLE %x", sdhci_readl(host, SDHCI_REG_SIG_ENABLE));

    rdy = SDHCI_INT_SPACE_AVAIL | SDHCI_INT_DATA_AVAIL;
    mask = SDHCI_DATA_AVAILABLE | SDHCI_SPACE_AVAILABLE;

    do {
        stat = sdhci_readl(host, SDHCI_REG_INT_STATUS);
        if (stat & SDHCI_INT_ERROR) {
            qe_error("%s error detect in status:%x", stat);
            return qe_err_common;
        }
        if (!transfer_done && (stat & rdy)) {
            if (!(sdhci_readl(host, SDHCI_REG_PRESENT_STATE) & mask))
                continue;
            sdhci_writel(host, SDHCI_REG_INT_STATUS, rdy);
            qe_debug("transfer pio");
            sdhci_transfer_pio(host, data);
            data->dest += data->blocksize;
            if (++block >= data->blocks) {
				/* Keep looping until the SDHCI_INT_DATA_END is
				 * cleared, even if we finished sending all the
				 * blocks.
				 */
                transfer_done = qe_true;
                qe_debug("transfer done");
                continue;
            }
        }

        if ((host->flags & SDHCI_USE_DMA) && 
            !transfer_done &&
            (stat & SDHCI_INT_DMA_END)) {
            sdhci_writel(host, SDHCI_REG_INT_STATUS, SDHCI_INT_DMA_END);
            if (host->flags & SDHCI_USE_SDMA) {
				start_addr &= ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1);
                start_addr += SDHCI_DEFAULT_BOUNDARY_SIZE;
				sdhci_writel(host, SDHCI_REG_DMA_ADDRESS, start_addr);
                qe_debug("set SDHCI_REG_DMA_ADDRESS %x", start_addr);
            }
        }

        if (timeout-- > 0) {
            qe_usleep(10);
        } else {
            qe_error("transfer data timeout");
            return qe_err_common;
        }
    } while (!(stat & SDHCI_INT_DATA_END));

    return qe_ok;
}

static qe_ret sdhci_send_cmd(qe_mmc *mmc, qe_mmc_cmd *cmd, qe_mmc_data *data)
{
    qe_ret ret;
    qe_u32 mask;
    qe_u32 mode;
    qe_u32 stat;
    qe_u32 flags;
    qe_uint trans_bytes = 0;
    qe_uint timeout = SDHCI_CMD_DEFAULT_TIMEOUT;
    qe_bool is_aligned;
    qe_time_t time;
    qe_sdhci_host *host = (qe_sdhci_host *)mmc->priv;

    qe_debug("cmd idx:%x", cmd->cmdidx);

    host->start_addr = 0;

    ret = qe_ok;

    mask = SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT;

	/* We shouldn't wait for data inihibit for stop commands, even
	   though they might use busy signaling */
    if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION ||
        ((cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
         cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200) && !data)) {
        mask &= ~SDHCI_DATA_INHIBIT;
    }

    time = qe_time_ms();

    while (sdhci_readl(host, SDHCI_REG_PRESENT_STATE) & mask) {
        if (time >= timeout) {
            qe_warning("%s busy", mmc->dev.name);
            if (2 * timeout <= SDHCI_CMD_MAX_TIMEOUT) {
                timeout += timeout;
                qe_warning("timeout increasing to %d ms", timeout);
            } else {
                qe_error("timeout");
                return qe_err_common;
            }
        }
        time++;
        qe_usleep(1000);
    }

    sdhci_writel(host, SDHCI_REG_INT_STATUS, SDHCI_INT_ALL_MASK);

    mask = SDHCI_INT_RESPONSE;
    if ((cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
	     cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200) && !data)
		mask = SDHCI_INT_DATA_AVAIL;

    if (!(cmd->resp_type & MMC_RSP_PRESENT))
		flags = SDHCI_CMD_RESP_NONE;
	else if (cmd->resp_type & MMC_RSP_136)
		flags = SDHCI_CMD_RESP_LONG;
	else if (cmd->resp_type & MMC_RSP_BUSY) {
		flags = SDHCI_CMD_RESP_SHORT_BUSY;
		if (data)
			mask |= SDHCI_INT_DATA_END;
	} else
		flags = SDHCI_CMD_RESP_SHORT;

	if (cmd->resp_type & MMC_RSP_CRC)
		flags |= SDHCI_CMD_CRC;
	if (cmd->resp_type & MMC_RSP_OPCODE)
		flags |= SDHCI_CMD_INDEX;
	if (data || cmd->cmdidx ==  MMC_CMD_SEND_TUNING_BLOCK ||
	    cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200)
		flags |= SDHCI_CMD_DATA;

    /* Set Transfer mode regarding to data flag */
    if (data) {
        
        sdhci_writeb(host, SDHCI_REG_TIMEOUT_CTRL, 0xe);
        
        mode = SDHCI_TRNS_BLK_CNT_EN;
        trans_bytes = data->blocks * data->blocksize;
        
        if (data->blocks > 1) {
            mode |= SDHCI_TRNS_MULTI;
        }
		
        if (data->flags == MMC_DATA_READ) {
			mode |= SDHCI_TRNS_READ;
        }
        
        if (host->flags & SDHCI_USE_DMA) {
            mode |= SDHCI_TRNS_DMA;
            qe_debug("prepare dma");
            sdhci_prepare_dma(host, data, &is_aligned, trans_bytes);
        }

        sdhci_writew(host, SDHCI_REG_BLOCK_SIZE,
            SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG, data->blocksize));
        sdhci_writew(host, SDHCI_REG_BLOCK_COUNT, data->blocks);
        sdhci_writew(host, SDHCI_REG_TRANSFER_MODE, mode);
    } else if (cmd->resp_type & MMC_RSP_BUSY) {
        sdhci_writeb(host, SDHCI_REG_TIMEOUT_CTRL, 0xe);
    }

    sdhci_writel(host, SDHCI_REG_ARGUMENT, cmd->cmdarg);
    sdhci_writew(host, SDHCI_REG_COMMAND, SDHCI_MAKE_CMD(cmd->cmdidx, flags));

    time = qe_time_ms();

    do {
        stat = sdhci_readl(host, SDHCI_REG_INT_STATUS);
        if (stat & SDHCI_INT_ERROR)
            break;

        if ((qe_time_ms() - time) >= SDHCI_READ_STATUS_TIMEOUT) {
            if (host->quirks & SDHCI_QUIRK_BROKEN_R1B) {
                return qe_ok;
            } else {
                qe_warning("%s timeout for status update", host->mmc->dev.name);
                return qe_err_common;
            }
        }
    } while ((stat & mask) != mask);
    qe_debug("stat:%x", stat);
	if ((stat & (SDHCI_INT_ERROR | mask)) == mask) {
		sdhci_cmd_done(host, cmd);
		sdhci_writel(host, SDHCI_REG_INT_STATUS, mask);
	} else {
        qe_error("cmd:%x error", cmd->cmdidx);
		ret = qe_err_common;
    }

    if (!ret && data) {
        ret = sdhci_transfer_data(host, data);
    }

	if (host->quirks & SDHCI_QUIRK_WAIT_SEND_CMD)
		qe_usleep(1000);

	stat = sdhci_readl(host, SDHCI_REG_INT_STATUS);
	sdhci_writel(host, SDHCI_REG_INT_STATUS, SDHCI_INT_ALL_MASK);
	if (!ret) {
		if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) &&
			!is_aligned && (data->flags == MMC_DATA_READ))
			qe_memcpy(data->dest, host->aligned_buffer, trans_bytes);
		return qe_ok;
	}

	sdhci_reset(host, SDHCI_RESET_CMD);
	sdhci_reset(host, SDHCI_RESET_DATA);
	if (stat & SDHCI_INT_TIMEOUT) {
        qe_warning("cmd %x timeout", cmd->cmdidx);
		return qe_err_timeout;
    } else {
		return qe_err_common;
    }
}

static qe_ret sdhci_set_ios(qe_mmc *mmc)
{
    qe_u32 ctrl;
    qe_sdhci_host *host = (qe_sdhci_host *)mmc->priv;

    qe_debug("sdhci_set_ios in");

	// if (host->ops && host->ops->set_control_reg)
	// 	host->ops->set_control_reg(host);

	if (mmc->clock != host->clock)
		sdhci_set_clock(host, mmc->clock);

	if (mmc->clk_disable)
		sdhci_set_clock(host, 0);

	/* Set bus width */
	ctrl = sdhci_readb(host, SDHCI_REG_HOST_CTRL);
	if (mmc->bus_width == 8) {
		ctrl &= ~SDHCI_CTRL_4BITBUS;
		if ((SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) ||
				(host->quirks & SDHCI_QUIRK_USE_WIDE8))
			ctrl |= SDHCI_CTRL_8BITBUS;
	} else {
		if ((SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) ||
				(host->quirks & SDHCI_QUIRK_USE_WIDE8))
			ctrl &= ~SDHCI_CTRL_8BITBUS;
		if (mmc->bus_width == 4)
			ctrl |= SDHCI_CTRL_4BITBUS;
		else
			ctrl &= ~SDHCI_CTRL_4BITBUS;
	}

	if (mmc->clock > 26000000)
		ctrl |= SDHCI_CTRL_HISPD;
	else
		ctrl &= ~SDHCI_CTRL_HISPD;

	if ((host->quirks & SDHCI_QUIRK_NO_HISPD_BIT) ||
	    (host->quirks & SDHCI_QUIRK_BROKEN_HISPD_MODE))
		ctrl &= ~SDHCI_CTRL_HISPD;

	sdhci_writeb(host, SDHCI_REG_HOST_CTRL, ctrl);

	/* If available, call the driver specific "post" set_ios() function */
	if (host->ops && host->ops->set_ios_post)
		return host->ops->set_ios_post(host);

	return 0;
}

static const qe_mmc_ops sdhci_ops = {
    .send_cmd = sdhci_send_cmd,
    .set_ios = sdhci_set_ios,
};

qe_ret qe_sdhci_probe(qe_sdhci_host *host, qe_const_str name, const qe_sdhci_ops *ops, qe_ptr priv)
{
	qe_mmc *mmc = host->mmc;

    sdhci_reset(host, SDHCI_RESET_ALL);

    sdhci_set_power(host, generic_fls(mmc->cfg->voltages) - 1);

    /* Enable only interrupts served by the SD controller */
	sdhci_writel(host, SDHCI_REG_INT_ENABLE, SDHCI_INT_DATA_MASK | SDHCI_INT_CMD_MASK);

	/* Mask all sdhci interrupt sources */
	sdhci_writel(host, SDHCI_REG_NOR_SIG_ENABLE, 0x0);

    host->priv = priv;

    qe_mmc_register(host->mmc, name, &sdhci_ops, host);

    return qe_ok;
}
