summaryrefslogtreecommitdiff
path: root/drivers/mmc/host/mmci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/host/mmci.c')
-rw-r--r--drivers/mmc/host/mmci.c1505
1 files changed, 1283 insertions, 222 deletions
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 2ed435bd4b6..92a5f73854c 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -2,7 +2,7 @@
* linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
*
* Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
- * Copyright (C) 2010 ST-Ericsson AB.
+ * Copyright (C) 2010 ST-Ericsson SA
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -18,44 +18,249 @@
#include <linux/err.h>
#include <linux/highmem.h>
#include <linux/log2.h>
+#include <linux/pm_runtime.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/pm.h>
#include <linux/amba/bus.h>
#include <linux/clk.h>
#include <linux/scatterlist.h>
#include <linux/gpio.h>
-#include <linux/amba/mmci.h>
#include <linux/regulator/consumer.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/amba/mmci.h>
-#include <asm/cacheflush.h>
-#include <asm/div64.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/sizes.h>
+#ifdef CONFIG_ARCH_U8500
+/* Temporary solution to find out if HW is db8500 v1 or v2. */
+#include <mach/hardware.h>
+#endif
+
#include "mmci.h"
#define DRIVER_NAME "mmci-pl18x"
static unsigned int fmax = 515633;
+static unsigned int dataread_delay_clks = 7500000;
+
+/**
+ * struct variant_data - MMCI variant-specific quirks
+ * @clkreg: default value for MCICLOCK register
+ * @clkreg_enable: enable value for MMCICLOCK register
+ * @dmareg_enable: enable value for MMCIDATACTRL register
+ * @datalength_bits: number of bits in the MMCIDATALENGTH register
+ * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY
+ * is asserted (likewise for RX)
+ * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
+ * is asserted (likewise for RX)
+ * @txsize_threshold: Sets DMA burst size to minimal if transfer size is
+ * less or equal to this threshold. This shall be specified in
+ * number of bytes. Set 0 for no burst compensation
+ * @broken_blockend: the MCI_DATABLOCKEND is broken on the hardware
+ * and will not work at all.
+ * @sdio: variant supports SDIO
+ * @st_clkdiv: true if using a ST-specific clock divider algorithm
+ * @pwrreg_powerup: power up value for MMCIPOWER register
+ * @signal_direction: input/out direction of bus signals can be indicated,
+ * this is usually used by e.g. voltage level translators.
+ * @non_power_of_2_blksize: variant supports block sizes that are not
+ * a power of two.
+ */
+struct variant_data {
+ unsigned int clkreg;
+ unsigned int clkreg_enable;
+ unsigned int dmareg_enable;
+ unsigned int datalength_bits;
+ unsigned int fifosize;
+ unsigned int fifohalfsize;
+ unsigned int txsize_threshold;
+ bool broken_blockend;
+ bool sdio;
+ bool st_clkdiv;
+ unsigned int pwrreg_powerup;
+ bool signal_direction;
+ bool non_power_of_2_blksize;
+};
+
+static struct variant_data variant_arm = {
+ .fifosize = 16 * 4,
+ .fifohalfsize = 8 * 4,
+ .datalength_bits = 16,
+ .pwrreg_powerup = MCI_PWR_UP,
+};
+
+static struct variant_data variant_u300 = {
+ .fifosize = 16 * 4,
+ .fifohalfsize = 8 * 4,
+ .clkreg_enable = 1 << 13, /* HWFCEN */
+ .datalength_bits = 16,
+ .sdio = true,
+ .pwrreg_powerup = MCI_PWR_ON,
+ .signal_direction = true,
+};
+
+static struct variant_data variant_ux500 = {
+ .fifosize = 30 * 4,
+ .fifohalfsize = 8 * 4,
+ .txsize_threshold = 16,
+ .clkreg = MCI_CLK_ENABLE,
+ .clkreg_enable = 1 << 14, /* HWFCEN */
+ .dmareg_enable = 1 << 12, /* DMAREQCTRL */
+ .datalength_bits = 24,
+ .broken_blockend = true,
+ .sdio = true,
+ .st_clkdiv = true,
+ .pwrreg_powerup = MCI_PWR_ON,
+ .signal_direction = true,
+ .non_power_of_2_blksize = true,
+};
+/*
+ * Debugfs
+ */
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+static int mmci_regs_show(struct seq_file *seq, void *v)
+{
+ struct mmci_host *host = seq->private;
+ unsigned long iflags;
+ u32 pwr, clk, arg, cmd, rspcmd, r0, r1, r2, r3;
+ u32 dtimer, dlength, dctrl, dcnt;
+ u32 sta, clear, mask0, mask1, fifocnt, fifo;
+
+ mmc_host_enable(host->mmc);
+ spin_lock_irqsave(&host->lock, iflags);
+
+ pwr = readl(host->base + MMCIPOWER);
+ clk = readl(host->base + MMCICLOCK);
+ arg = readl(host->base + MMCIARGUMENT);
+ cmd = readl(host->base + MMCICOMMAND);
+ rspcmd = readl(host->base + MMCIRESPCMD);
+ r0 = readl(host->base + MMCIRESPONSE0);
+ r1 = readl(host->base + MMCIRESPONSE1);
+ r2 = readl(host->base + MMCIRESPONSE2);
+ r3 = readl(host->base + MMCIRESPONSE3);
+ dtimer = readl(host->base + MMCIDATATIMER);
+ dlength = readl(host->base + MMCIDATALENGTH);
+ dctrl = readl(host->base + MMCIDATACTRL);
+ dcnt = readl(host->base + MMCIDATACNT);
+ sta = readl(host->base + MMCISTATUS);
+ clear = readl(host->base + MMCICLEAR);
+ mask0 = readl(host->base + MMCIMASK0);
+ mask1 = readl(host->base + MMCIMASK1);
+ fifocnt = readl(host->base + MMCIFIFOCNT);
+ fifo = readl(host->base + MMCIFIFO);
+
+ spin_unlock_irqrestore(&host->lock, iflags);
+ mmc_host_disable(host->mmc);
+
+ seq_printf(seq, "\033[1;34mMMCI registers\033[0m\n");
+ seq_printf(seq, "%-20s:0x%x\n", "mmci_power", pwr);
+ seq_printf(seq, "%-20s:0x%x\n", "mmci_clock", clk);
+ seq_printf(seq, "%-20s:0x%x\n", "mmci_arg", arg);
+ seq_printf(seq, "%-20s:0x%x\n", "mmci_cmd", cmd);
+ seq_printf(seq, "%-20s:0x%x\n", "mmci_respcmd", rspcmd);
+ seq_printf(seq, "%-20s:0x%x\n", "mmci_resp0", r0);
+ seq_printf(seq, "%-20s:0x%x\n", "mmci_resp1", r1);
+ seq_printf(seq, "%-20s:0x%x\n", "mmci_resp2", r2);
+ seq_printf(seq, "%-20s:0x%x\n", "mmci_resp3", r3);
+ seq_printf(seq, "%-20s:0x%x\n", "mmci_datatimer", dtimer);
+ seq_printf(seq, "%-20s:0x%x\n", "mmci_datalen", dlength);
+ seq_printf(seq, "%-20s:0x%x\n", "mmci_datactrl", dctrl);
+ seq_printf(seq, "%-20s:0x%x\n", "mmci_datacnt", dcnt);
+ seq_printf(seq, "%-20s:0x%x\n", "mmci_status", sta);
+ seq_printf(seq, "%-20s:0x%x\n", "mmci_iclear", clear);
+ seq_printf(seq, "%-20s:0x%x\n", "mmci_imask0", mask0);
+ seq_printf(seq, "%-20s:0x%x\n", "mmci_imask1", mask1);
+ seq_printf(seq, "%-20s:0x%x\n", "mmci_fifocnt", fifocnt);
+ seq_printf(seq, "%-20s:0x%x\n", "mmci_fifo", fifo);
+
+ return 0;
+}
+
+static int mmci_regs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mmci_regs_show, inode->i_private);
+}
+
+static const struct file_operations mmci_fops_regs = {
+ .owner = THIS_MODULE,
+ .open = mmci_regs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void mmci_debugfs_create(struct mmci_host *host)
+{
+ host->debug_regs = debugfs_create_file("regs", S_IRUGO,
+ host->mmc->debugfs_root, host,
+ &mmci_fops_regs);
+
+ if (IS_ERR(host->debug_regs))
+ dev_err(mmc_dev(host->mmc),
+ "failed to create debug regs file\n");
+}
+
+static void mmci_debugfs_remove(struct mmci_host *host)
+{
+ debugfs_remove(host->debug_regs);
+}
+
+#else
+static inline void mmci_debugfs_create(struct mmci_host *host) { }
+static inline void mmci_debugfs_remove(struct mmci_host *host) { }
+#endif
+
+/*
+ * Uggly hack! This must be removed soon!!
+ *
+ * u8500_sdio_detect_card() - Initiates card scan for sdio host.
+ * This is required to initiate card rescan from sdio client device driver.
+ *
+ * sdio_host_ptr - Host pointer to save SDIO host data structure
+ * (will only work when the SDIO device is probed as the last MMCI device).
+ */
+static struct mmci_host *sdio_host_ptr;
+void u8500_sdio_detect_card(void)
+{
+ struct mmci_host *host = sdio_host_ptr;
+ if (sdio_host_ptr && host->mmc)
+ mmc_detect_change(host->mmc, msecs_to_jiffies(10));
+
+ return;
+}
+EXPORT_SYMBOL(u8500_sdio_detect_card);
/*
* This must be called with host->lock held
*/
static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
{
- u32 clk = 0;
+ struct variant_data *variant = host->variant;
+ u32 clk = variant->clkreg;
if (desired) {
if (desired >= host->mclk) {
- clk = MCI_CLK_BYPASS;
+ clk = MCI_CLK_BYPASS | MCI_NEG_EDGE;
host->cclk = host->mclk;
+ } else if (variant->st_clkdiv) {
+ clk = ((host->mclk + desired - 1) / desired) - 2;
+ if (clk >= 256)
+ clk = 255;
+ host->cclk = host->mclk / (clk + 2);
} else {
clk = host->mclk / (2 * desired) - 1;
if (clk >= 256)
clk = 255;
host->cclk = host->mclk / (2 * (clk + 1));
}
- if (host->hw_designer == AMBA_VENDOR_ST)
- clk |= MCI_ST_FCEN; /* Bug fix in ST IP block */
+
+ clk |= variant->clkreg_enable;
clk |= MCI_CLK_ENABLE;
/* This hasn't proven to be worthwhile */
/* clk |= MCI_CLK_PWRSAVE; */
@@ -91,63 +296,62 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
spin_lock(&host->lock);
}
-static void mmci_stop_data(struct mmci_host *host)
-{
- writel(0, host->base + MMCIDATACTRL);
- writel(0, host->base + MMCIMASK1);
- host->data = NULL;
-}
-
-static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
+static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
{
- unsigned int datactrl, timeout, irqmask;
- unsigned long long clks;
- void __iomem *base;
- int blksz_bits;
+ void __iomem *base = host->base;
- dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
- data->blksz, data->blocks, data->flags);
+ if (host->singleirq) {
+ unsigned int mask0 = readl(base + MMCIMASK0);
- host->data = data;
- host->size = data->blksz;
- host->data_xfered = 0;
+ mask0 &= ~MCI_IRQ1MASK;
+ mask0 |= mask;
- mmci_init_sg(host, data);
+ writel(mask0, base + MMCIMASK0);
+ }
- clks = (unsigned long long)data->timeout_ns * host->cclk;
- do_div(clks, 1000000000UL);
+ writel(mask, base + MMCIMASK1);
+}
- timeout = data->timeout_clks + (unsigned int)clks;
+static void mmci_stop_data(struct mmci_host *host)
+{
+ u32 clk;
+ unsigned int datactrl = 0;
- base = host->base;
- writel(timeout, base + MMCIDATATIMER);
- writel(host->size, base + MMCIDATALENGTH);
+ /*
+ * The ST Micro variants has a special bit
+ * to enable SDIO mode. This bit must remain set even when not
+ * doing data transfers, otherwise no SDIO interrupts can be
+ * received.
+ */
+ if (host->variant->sdio &&
+ host->mmc->card &&
+ mmc_card_sdio(host->mmc->card))
+ datactrl |= MCI_ST_DPSM_SDIOEN;
- blksz_bits = ffs(data->blksz) - 1;
- BUG_ON(1 << blksz_bits != data->blksz);
+ writel(datactrl, host->base + MMCIDATACTRL);
+ mmci_set_mask1(host, 0);
- datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
- if (data->flags & MMC_DATA_READ) {
- datactrl |= MCI_DPSM_DIRECTION;
- irqmask = MCI_RXFIFOHALFFULLMASK;
+ /* Needed for DDR */
+ if (host->mmc->card && mmc_card_ddr_mode(host->mmc->card)) {
+ clk = readl(host->base + MMCICLOCK);
+ clk &= ~(MCI_NEG_EDGE);
- /*
- * If we have less than a FIFOSIZE of bytes to transfer,
- * trigger a PIO interrupt as soon as any data is available.
- */
- if (host->size < MCI_FIFOSIZE)
- irqmask |= MCI_RXDATAAVLBLMASK;
- } else {
- /*
- * We don't actually need to include "FIFO empty" here
- * since its implicit in "FIFO half empty".
- */
- irqmask = MCI_TXFIFOHALFEMPTYMASK;
+ writel(clk, (host->base + MMCICLOCK));
}
- writel(datactrl, base + MMCIDATACTRL);
- writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
- writel(irqmask, base + MMCIMASK1);
+ host->data = NULL;
+}
+
+static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
+{
+ unsigned int flags = SG_MITER_ATOMIC;
+
+ if (data->flags & MMC_DATA_READ)
+ flags |= SG_MITER_TO_SG;
+ else
+ flags |= SG_MITER_FROM_SG;
+
+ sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
}
static void
@@ -179,49 +383,499 @@ mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
}
static void
-mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
- unsigned int status)
+mmci_complete_data_xfer(struct mmci_host *host)
{
- if (status & MCI_DATABLOCKEND) {
- host->data_xfered += data->blksz;
-#ifdef CONFIG_ARCH_U300
+ struct mmc_data *data = host->data;
+
+ if ((host->size == 0) || data->error) {
+
+ /*
+ * Variants with broken blockend flags and as well dma
+ * transfers handles the end of the entire transfer here.
+ */
+ if (host->last_blockend && !data->error)
+ host->data_xfered = data->blksz * data->blocks;
+
+ mmci_stop_data(host);
+
+ if (!data->stop)
+ mmci_request_end(host, data->mrq);
+ else
+ mmci_start_command(host, data->stop, 0);
+ }
+}
+
+/*
+ * All the DMA operation mode stuff goes inside this ifdef.
+ * This assumes that you have a generic DMA device interface,
+ * no custom DMA interfaces are supported.
+ */
+#ifdef CONFIG_DMA_ENGINE
+static void __devinit mmci_setup_dma(struct mmci_host *host)
+{
+ struct mmci_platform_data *plat = host->plat;
+ dma_cap_mask_t mask;
+
+ if (!plat || !plat->dma_filter) {
+ dev_err(mmc_dev(host->mmc), "no DMA platform data!\n");
+ return;
+ }
+
+ /* Try to acquire a generic DMA engine slave channel */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ /*
+ * If only an RX channel is specified, the driver will
+ * attempt to use it bidirectionally, however if it is
+ * is specified but cannot be located, DMA will be disabled.
+ */
+ host->dma_rx_channel = dma_request_channel(mask,
+ plat->dma_filter,
+ plat->dma_rx_param);
+ /* E.g if no DMA hardware is present */
+ if (!host->dma_rx_channel) {
+ dev_err(mmc_dev(host->mmc), "no RX DMA channel!\n");
+ return;
+ }
+ if (plat->dma_tx_param) {
+ host->dma_tx_channel = dma_request_channel(mask,
+ plat->dma_filter,
+ plat->dma_tx_param);
+ if (!host->dma_tx_channel) {
+ dma_release_channel(host->dma_rx_channel);
+ host->dma_rx_channel = NULL;
+ return;
+ }
+ } else {
+ host->dma_tx_channel = host->dma_rx_channel;
+ }
+ host->dma_enable = true;
+ dev_info(mmc_dev(host->mmc), "use DMA channels DMA RX %s, DMA TX %s\n",
+ dma_chan_name(host->dma_rx_channel),
+ dma_chan_name(host->dma_tx_channel));
+}
+
+/*
+ * This is used in __devinit or __devexit so inline it
+ * so it can be discarded.
+ */
+static inline void mmci_disable_dma(struct mmci_host *host)
+{
+ if (host->dma_rx_channel)
+ dma_release_channel(host->dma_rx_channel);
+ if (host->dma_tx_channel)
+ dma_release_channel(host->dma_tx_channel);
+ host->dma_enable = false;
+}
+
+static void mmci_dma_data_end(struct mmci_host *host)
+{
+ struct mmc_data *data = host->data;
+
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ (data->flags & MMC_DATA_WRITE)
+ ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ host->dma_on_current_xfer = false;
+}
+
+static void mmci_dma_terminate(struct mmci_host *host)
+{
+ struct mmc_data *data = host->data;
+ struct dma_chan *chan;
+
+ dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
+ if (data->flags & MMC_DATA_READ)
+ chan = host->dma_rx_channel;
+ else
+ chan = host->dma_tx_channel;
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ (data->flags & MMC_DATA_WRITE)
+ ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ host->dma_on_current_xfer = false;
+}
+
+static void mmci_dma_callback(void *arg)
+{
+ unsigned long flags;
+ struct mmci_host *host = arg;
+
+ dev_vdbg(mmc_dev(host->mmc), "DMA transfer done!\n");
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ mmci_dma_data_end(host);
+
+ /* Mark that the entire data is transferred for this dma transfer. */
+ host->size = 0;
+
+ /*
+ * Make sure MMCI has received MCI_DATAEND before
+ * completing the data transfer.
+ */
+ if (host->dataend)
+ mmci_complete_data_xfer(host);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
+{
+ struct variant_data *variant = host->variant;
+ struct dma_slave_config rx_conf = {
+ .src_addr = host->phybase + MMCIFIFO,
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .direction = DMA_FROM_DEVICE,
+ .src_maxburst = variant->fifohalfsize >> 2, /* # of words */
+ };
+ struct dma_slave_config tx_conf = {
+ .dst_addr = host->phybase + MMCIFIFO,
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .direction = DMA_TO_DEVICE,
+ .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
+ };
+ struct mmc_data *data = host->data;
+ enum dma_data_direction direction;
+ struct dma_chan *chan;
+ struct dma_async_tx_descriptor *desc;
+ struct scatterlist *sg;
+ dma_cookie_t cookie;
+ int i;
+ unsigned int irqmask0;
+ int sg_len;
+
+ /* If less than or equal to the fifo size, don't bother with DMA */
+ if (host->size <= variant->fifosize)
+ return -EINVAL;
+
+ datactrl |= MCI_DPSM_DMAENABLE;
+ datactrl |= variant->dmareg_enable;
+
+ if (data->flags & MMC_DATA_READ) {
+ if (host->size <= variant->txsize_threshold)
+ rx_conf.src_maxburst = 1;
+
+ direction = DMA_FROM_DEVICE;
+ chan = host->dma_rx_channel;
+ chan->device->device_control(chan, DMA_SLAVE_CONFIG,
+ (unsigned long) &rx_conf);
+ } else {
+ if (host->size <= variant->txsize_threshold)
+ tx_conf.dst_maxburst = 1;
+
+ direction = DMA_TO_DEVICE;
+ chan = host->dma_tx_channel;
+ chan->device->device_control(chan, DMA_SLAVE_CONFIG,
+ (unsigned long) &tx_conf);
+ }
+
+ /* Check for weird stuff in the sg list */
+ for_each_sg(data->sg, sg, data->sg_len, i) {
+ dev_vdbg(mmc_dev(host->mmc),
+ "MMCI SGlist %d dir %d: length: %08x\n",
+ i, direction, sg->length);
+ if (sg->offset & 3 || sg->length & 3)
+ return -EINVAL;
+ }
+
+ sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len, direction);
+ if (!sg_len)
+ goto map_err;
+
+ desc = chan->device->device_prep_slave_sg(chan,
+ data->sg, sg_len, direction,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc)
+ goto unmap_exit;
+
+ desc->callback = mmci_dma_callback;
+ desc->callback_param = host;
+ host->dma_desc = desc;
+ dev_vdbg(mmc_dev(host->mmc), "Submit MMCI DMA job, sglen %d "
+ "blksz %04x blks %04x flags %08x\n",
+ data->sg_len, data->blksz, data->blocks, data->flags);
+ cookie = desc->tx_submit(desc);
+
+ /* Here overloaded DMA controllers may fail */
+ if (dma_submit_error(cookie))
+ goto unmap_exit;
+
+ host->dma_on_current_xfer = true;
+ chan->device->device_issue_pending(chan);
+
+ /*
+ * MMCI monitors both MCI_DATAEND and the DMA callback.
+ * Both events must occur before the transfer is considered
+ * to be completed. MCI_DATABLOCKEND is not used in DMA mode.
+ */
+ host->last_blockend = true;
+ irqmask0 = readl(host->base + MMCIMASK0);
+ irqmask0 &= ~MCI_DATABLOCKENDMASK;
+ writel(irqmask0, host->base + MMCIMASK0);
+
+ /* Trigger the DMA transfer */
+ writel(datactrl, host->base + MMCIDATACTRL);
+ return 0;
+
+unmap_exit:
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, direction);
+map_err:
+ chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ return -ENOMEM;
+}
+#else
+/* Blank functions if the DMA engine is not available */
+static inline void mmci_setup_dma(struct mmci_host *host)
+{
+}
+
+static inline void mmci_disable_dma(struct mmci_host *host)
+{
+}
+
+static inline void mmci_dma_data_end(struct mmci_host *host)
+{
+}
+
+static inline void mmci_dma_terminate(struct mmci_host *host)
+{
+}
+
+static inline int mmci_dma_start_data(struct mmci_host *host,
+ unsigned int datactrl)
+{
+ return -ENOSYS;
+}
+#endif
+
+static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
+{
+ struct variant_data *variant = host->variant;
+ unsigned int datactrl, timeout, irqmask0, irqmask1;
+ unsigned int clkcycle_ns;
+ void __iomem *base;
+ int blksz_bits;
+ u32 clk;
+
+ dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
+ data->blksz, data->blocks, data->flags);
+
+ host->data = data;
+ host->size = data->blksz * data->blocks;
+ host->data_xfered = 0;
+ host->last_blockend = false;
+ host->dataend = false;
+ host->cache_len = 0;
+ host->cache = 0;
+
+ clkcycle_ns = 1000000000 / host->cclk;
+ timeout = data->timeout_ns / clkcycle_ns;
+ timeout += data->timeout_clks;
+
+ if (data->flags & MMC_DATA_READ) {
/*
- * On the U300 some signal or other is
- * badly routed so that a data write does
- * not properly terminate with a MCI_DATAEND
- * status flag. This quirk will make writes
- * work again.
+ * Since the read command is sent after we have setup
+ * the data transfer we must increase the data timeout.
+ * Unfortunately this is not enough since some cards
+ * does not seem to stick to what is stated in their
+ * CSD for TAAC and NSAC.
*/
- if (data->flags & MMC_DATA_WRITE)
- status |= MCI_DATAEND;
+ timeout += dataread_delay_clks;
+ }
+
+ base = host->base;
+ writel(timeout, base + MMCIDATATIMER);
+ writel(host->size, base + MMCIDATALENGTH);
+
+ blksz_bits = ffs(data->blksz) - 1;
+
+#ifdef CONFIG_ARCH_U8500
+ /* Temporary solution for db8500v2. */
+ if (cpu_is_u8500v20_or_later())
+ datactrl = MCI_DPSM_ENABLE | (data->blksz << 16);
+ else
#endif
+ datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
+
+ if (data->flags & MMC_DATA_READ)
+ datactrl |= MCI_DPSM_DIRECTION;
+
+ if (host->mmc->card && mmc_card_ddr_mode(host->mmc->card)) {
+ datactrl |= MCI_ST_DPSM_DDRMODE;
+
+ /* Needed for DDR */
+ clk = readl(base + MMCICLOCK);
+ clk |= MCI_NEG_EDGE;
+
+ writel(clk, (base + MMCICLOCK));
+ }
+
+ if (variant->sdio &&
+ host->mmc->card &&
+ mmc_card_sdio(host->mmc->card)) {
+ /*
+ * The ST Micro variants has a special bit
+ * to enable SDIO mode. This bit is set the first time
+ * a SDIO data transfer is done and must remain set
+ * after the data transfer is completed. The reason is
+ * because of otherwise no SDIO interrupts can be
+ * received.
+ */
+ datactrl |= MCI_ST_DPSM_SDIOEN;
+
+ /*
+ * The ST Micro variant for SDIO transfer sizes
+ * less than or equal to 8 bytes needs to have clock
+ * H/W flow control disabled. Since flow control is
+ * not really needed for anything that fits in the
+ * FIFO, we can disable it for any write smaller
+ * than the FIFO size.
+ */
+ if ((host->size <= variant->fifosize) &&
+ (data->flags & MMC_DATA_WRITE))
+ writel(readl(host->base + MMCICLOCK) &
+ ~variant->clkreg_enable,
+ host->base + MMCICLOCK);
+ else
+ writel(readl(host->base + MMCICLOCK) |
+ variant->clkreg_enable,
+ host->base + MMCICLOCK);
+ }
+
+ if (host->dma_enable) {
+ int ret;
+
+ /*
+ * Attempt to use DMA operation mode, if this
+ * should fail, fall back to PIO mode
+ */
+ ret = mmci_dma_start_data(host, datactrl);
+ if (!ret)
+ return;
+ }
+
+ /* IRQ mode, map the SG list for CPU reading/writing */
+ mmci_init_sg(host, data);
+
+ if (data->flags & MMC_DATA_READ) {
+ irqmask1 = MCI_RXFIFOHALFFULLMASK;
+
+ /*
+ * If we have less than a FIFOSIZE of bytes to
+ * transfer, trigger a PIO interrupt as soon as any
+ * data is available.
+ */
+ if (host->size < variant->fifosize)
+ irqmask1 |= MCI_RXDATAAVLBLMASK;
+ } else {
+ /*
+ * We don't actually need to include "FIFO empty" here
+ * since its implicit in "FIFO half empty".
+ */
+ irqmask1 = MCI_TXFIFOHALFEMPTYMASK;
+ }
+
+ /* Setup IRQ */
+ irqmask0 = readl(base + MMCIMASK0);
+ if (variant->broken_blockend) {
+ host->last_blockend = true;
+ irqmask0 &= ~MCI_DATABLOCKENDMASK;
+ } else {
+ irqmask0 |= MCI_DATABLOCKENDMASK;
}
- if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
- dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status);
+ writel(irqmask0, base + MMCIMASK0);
+ mmci_set_mask1(host, irqmask1);
+
+ /* Start the data transfer */
+ writel(datactrl, base + MMCIDATACTRL);
+}
+
+static void
+mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
+ unsigned int status)
+{
+ struct variant_data *variant = host->variant;
+
+ /* First check for errors */
+ if (status & MCI_DATA_ERR) {
+ dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n",
+ status);
if (status & MCI_DATACRCFAIL)
data->error = -EILSEQ;
else if (status & MCI_DATATIMEOUT)
data->error = -ETIMEDOUT;
else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN))
data->error = -EIO;
- status |= MCI_DATAEND;
/*
* We hit an error condition. Ensure that any data
- * partially written to a page is properly coherent.
+ * partially written to a page is properly coherent,
+ * unless we're using DMA.
*/
- if (host->sg_len && data->flags & MMC_DATA_READ)
- flush_dcache_page(sg_page(host->sg_ptr));
+ if (host->dma_on_current_xfer)
+ mmci_dma_terminate(host);
+ else if (data->flags & MMC_DATA_READ) {
+ struct sg_mapping_iter *sg_miter = &host->sg_miter;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ if (sg_miter_next(sg_miter)) {
+ flush_dcache_page(sg_miter->page);
+ sg_miter_stop(sg_miter);
+ }
+ local_irq_restore(flags);
+ }
}
- if (status & MCI_DATAEND) {
- mmci_stop_data(host);
- if (!data->stop) {
- mmci_request_end(host, data->mrq);
- } else {
- mmci_start_command(host, data->stop, 0);
+ /*
+ * On ARM variants in PIO mode, MCI_DATABLOCKEND
+ * is always sent first, and we increase the
+ * transfered number of bytes for that IRQ. Then
+ * MCI_DATAEND follows and we conclude the transaction.
+ *
+ * On the Ux500 single-IRQ variant MCI_DATABLOCKEND
+ * doesn't seem to immediately clear from the status,
+ * so we can't use it keep count when only one irq is
+ * used because the irq will hit for other reasons, and
+ * then the flag is still up. So we use the MCI_DATAEND
+ * IRQ at the end of the entire transfer because
+ * MCI_DATABLOCKEND is broken.
+ *
+ * In the U300, the IRQs can arrive out-of-order,
+ * e.g. MCI_DATABLOCKEND sometimes arrives after MCI_DATAEND,
+ * so for this case we use the flags "last_blockend" and
+ * "dataend" to make sure both IRQs have arrived before
+ * concluding the transaction. (This does not apply
+ * to the Ux500 which doesn't fire MCI_DATABLOCKEND
+ * at all.) In DMA mode it suffers from the same problem
+ * as the Ux500.
+ */
+ if (status & MCI_DATABLOCKEND) {
+ /*
+ * Just being a little over-cautious, we do not
+ * use this progressive update if the hardware blockend
+ * flag is unreliable: since it can stay high between
+ * IRQs it will corrupt the transfer counter.
+ */
+ if (!variant->broken_blockend && !host->dma_on_current_xfer) {
+ host->data_xfered += data->blksz;
+
+ if (host->data_xfered == data->blksz * data->blocks)
+ host->last_blockend = true;
}
}
+
+ if (status & MCI_DATAEND)
+ host->dataend = true;
+
+ /*
+ * On variants with broken blockend we shall only wait for dataend,
+ * on others we must sync with the blockend signal since they can
+ * appear out-of-order.
+ */
+ if ((host->dataend && host->last_blockend) || data->error)
+ mmci_complete_data_xfer(host);
}
static void
@@ -237,22 +891,24 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
cmd->resp[2] = readl(base + MMCIRESPONSE2);
cmd->resp[3] = readl(base + MMCIRESPONSE3);
- if (status & MCI_CMDTIMEOUT) {
+ if (status & MCI_CMDTIMEOUT)
cmd->error = -ETIMEDOUT;
- } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
+ else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC)
cmd->error = -EILSEQ;
- }
if (!cmd->data || cmd->error) {
- if (host->data)
+ if (host->data) {
+ if (host->dma_on_current_xfer)
+ mmci_dma_terminate(host);
mmci_stop_data(host);
+ }
mmci_request_end(host, cmd->mrq);
- } else if (!(cmd->data->flags & MMC_DATA_READ)) {
+ } else if (!(cmd->data->flags & MMC_DATA_READ))
mmci_start_data(host, cmd->data);
- }
}
-static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
+static int mmci_pio_read(struct mmci_host *host, char *buffer,
+ unsigned int remain)
{
void __iomem *base = host->base;
char *ptr = buffer;
@@ -268,7 +924,24 @@ static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int rema
if (count <= 0)
break;
- readsl(base + MMCIFIFO, ptr, count >> 2);
+ /*
+ * SDIO especially may want to receive something that is
+ * not divisible by 4 (as opposed to card sectors
+ * etc). Therefore make sure we always read the last bytes
+ * out of the FIFO.
+ */
+ switch (count) {
+ case 1:
+ case 3:
+ readsb(base + MMCIFIFO, ptr, count);
+ break;
+ case 2:
+ readsw(base + MMCIFIFO, ptr, 1);
+ break;
+ default:
+ readsl(base + MMCIFIFO, ptr, count >> 2);
+ break;
+ }
ptr += count;
remain -= count;
@@ -283,21 +956,96 @@ static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int rema
return ptr - buffer;
}
-static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
+static int mmci_pio_write(struct mmci_host *host, char *buffer,
+ unsigned int remain, u32 status)
{
+ struct variant_data *variant = host->variant;
void __iomem *base = host->base;
char *ptr = buffer;
+ unsigned int data_left = host->size;
+ unsigned int count, maxcnt;
+ char *cache_ptr;
+ int i;
+
do {
- unsigned int count, maxcnt;
+ maxcnt = status & MCI_TXFIFOEMPTY ?
+ variant->fifosize : variant->fifohalfsize;
+
+ /*
+ * A write to the FIFO must always be done of 4 bytes aligned
+ * data. If the buffer is not 4 bytes aligned we must pad the
+ * data, but this must only be done for the final write for the
+ * entire data transfer, otherwise we will corrupt the data.
+ * Thus a buffer cache of four bytes is needed to temporary
+ * store data.
+ */
+
+ if (host->cache_len) {
+ cache_ptr = (char *)&host->cache;
+ cache_ptr = cache_ptr + host->cache_len;
+ data_left += host->cache_len;
+
+ while ((host->cache_len < 4) && (remain > 0)) {
+ *cache_ptr = *ptr;
+ cache_ptr++;
+ ptr++;
+ host->cache_len++;
+ remain--;
+ }
+
+ if ((host->cache_len == 4) ||
+ (data_left == host->cache_len)) {
+
+ writesl(base + MMCIFIFO, &host->cache, 1);
+ if (data_left == host->cache_len)
+ break;
+
+ host->cache = 0;
+ host->cache_len = 0;
+ maxcnt -= 4;
+ data_left -= 4;
+ }
+
+ if (remain == 0)
+ break;
+ }
- maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE : MCI_FIFOHALFSIZE;
count = min(remain, maxcnt);
- writesl(base + MMCIFIFO, ptr, count >> 2);
+ if (!(count % 4) || (data_left == count)) {
+ /*
+ * The data is either 4-bytes aligned or it is the
+ * last data to write. It is thus fine to potentially
+ * pad the data if needed.
+ */
+ writesl(base + MMCIFIFO, ptr, (count + 3) >> 2);
+ ptr += count;
+ remain -= count;
+ data_left -= count;
- ptr += count;
- remain -= count;
+ } else {
+
+ host->cache_len = count % 4;
+ count = (count >> 2) << 2;
+
+ if (count)
+ writesl(base + MMCIFIFO, ptr, count >> 2);
+
+ ptr += count;
+ remain -= count;
+ data_left -= count;
+
+ i = 0;
+ cache_ptr = (char *)&host->cache;
+ while (i < host->cache_len) {
+ *cache_ptr = *ptr;
+ cache_ptr++;
+ ptr++;
+ remain--;
+ i++;
+ }
+ }
if (remain == 0)
break;
@@ -314,15 +1062,19 @@ static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int rem
static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
{
struct mmci_host *host = dev_id;
+ struct sg_mapping_iter *sg_miter = &host->sg_miter;
+ struct variant_data *variant = host->variant;
void __iomem *base = host->base;
+ unsigned long flags;
u32 status;
status = readl(base + MMCISTATUS);
dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
+ local_irq_save(flags);
+
do {
- unsigned long flags;
unsigned int remain, len;
char *buffer;
@@ -336,11 +1088,11 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
break;
- /*
- * Map the current scatter buffer.
- */
- buffer = mmci_kmap_atomic(host, &flags) + host->sg_off;
- remain = host->sg_ptr->length - host->sg_off;
+ if (!sg_miter_next(sg_miter))
+ break;
+
+ buffer = sg_miter->addr;
+ remain = sg_miter->length;
len = 0;
if (status & MCI_RXACTIVE)
@@ -348,47 +1100,42 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
if (status & MCI_TXACTIVE)
len = mmci_pio_write(host, buffer, remain, status);
- /*
- * Unmap the buffer.
- */
- mmci_kunmap_atomic(host, buffer, &flags);
+ sg_miter->consumed = len;
- host->sg_off += len;
host->size -= len;
remain -= len;
if (remain)
break;
- /*
- * If we were reading, and we have completed this
- * page, ensure that the data cache is coherent.
- */
if (status & MCI_RXACTIVE)
- flush_dcache_page(sg_page(host->sg_ptr));
-
- if (!mmci_next_sg(host))
- break;
+ flush_dcache_page(sg_miter->page);
status = readl(base + MMCISTATUS);
} while (1);
+ sg_miter_stop(sg_miter);
+
+ local_irq_restore(flags);
+
/*
* If we're nearing the end of the read, switch to
* "any data available" mode.
*/
- if (status & MCI_RXACTIVE && host->size < MCI_FIFOSIZE)
- writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1);
+ if (status & MCI_RXACTIVE && host->size < variant->fifosize)
+ mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
- /*
- * If we run out of data, disable the data IRQs; this
- * prevents a race where the FIFO becomes empty before
- * the chip itself has disabled the data path, and
- * stops us racing with our data end IRQ.
- */
+ /* If we run out of data, disable the data IRQs. */
if (host->size == 0) {
- writel(0, base + MMCIMASK1);
- writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
+ mmci_set_mask1(host, 0);
+
+ /*
+ * If we already received MCI_DATAEND and the last
+ * MCI_DATABLOCKEND, the entire data transfer shall
+ * be completed.
+ */
+ if (host->dataend && host->last_blockend)
+ mmci_complete_data_xfer(host);
}
return IRQ_HANDLED;
@@ -401,6 +1148,7 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
{
struct mmci_host *host = dev_id;
u32 status;
+ int sdio_irq = 0;
int ret = 0;
spin_lock(&host->lock);
@@ -410,18 +1158,28 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
struct mmc_data *data;
status = readl(host->base + MMCISTATUS);
+
+ if (host->singleirq) {
+ if (status & readl(host->base + MMCIMASK1))
+ mmci_pio_irq(irq, dev_id);
+
+ status &= ~MCI_IRQ1MASK;
+ }
+
status &= readl(host->base + MMCIMASK0);
writel(status, host->base + MMCICLEAR);
dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
+ if (status & MCI_SDIOIT)
+ sdio_irq = 1;
+
data = host->data;
- if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|
- MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data)
+ if (status & MCI_DATA_IRQ && data)
mmci_data_irq(host, data, status);
cmd = host->cmd;
- if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd)
+ if (status & MCI_CMD_IRQ && cmd)
mmci_cmd_irq(host, cmd, status);
ret = 1;
@@ -429,17 +1187,27 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
spin_unlock(&host->lock);
+ if (sdio_irq)
+ mmc_signal_sdio_irq(host->mmc);
+
return IRQ_RETVAL(ret);
}
static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct mmci_host *host = mmc_priv(mmc);
+ struct variant_data *variant = host->variant;
unsigned long flags;
WARN_ON(host->mrq != NULL);
- if (mrq->data && !is_power_of_2(mrq->data->blksz)) {
+ if (mrq->data &&
+ (!variant->non_power_of_2_blksize ||
+#ifdef CONFIG_ARCH_U8500
+ !cpu_is_u8500v20_or_later() ||
+#endif
+ (mmc->card && mmc_card_ddr_mode(mmc->card))) &&
+ !is_power_of_2(mrq->data->blksz)) {
dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n",
mrq->data->blksz);
mrq->cmd->error = -EINVAL;
@@ -462,41 +1230,81 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct mmci_host *host = mmc_priv(mmc);
+ struct variant_data *variant = host->variant;
u32 pwr = 0;
unsigned long flags;
+ int ret;
+
+ if (host->plat->vdd_handler)
+ host->plat->vdd_handler(mmc_dev(mmc), ios->vdd,
+ ios->power_mode);
switch (ios->power_mode) {
case MMC_POWER_OFF:
- if(host->vcc &&
- regulator_is_enabled(host->vcc))
- regulator_disable(host->vcc);
+ if (host->vcard)
+ ret = mmc_regulator_set_ocr(mmc, host->vcard, 0);
break;
case MMC_POWER_UP:
-#ifdef CONFIG_REGULATOR
- if (host->vcc)
- /* This implicitly enables the regulator */
- mmc_regulator_set_ocr(host->vcc, ios->vdd);
-#endif
+ if (host->vcard) {
+ ret = mmc_regulator_set_ocr(mmc, host->vcard, ios->vdd);
+ if (ret) {
+ dev_err(mmc_dev(mmc), "unable to set OCR\n");
+ /*
+ * The .set_ios() function in the mmc_host_ops
+ * struct return void, and failing to set the
+ * power should be rare so we print an error
+ * and return here.
+ */
+ return;
+ }
+ }
+
/*
- * The translate_vdd function is not used if you have
- * an external regulator, or your design is really weird.
- * Using it would mean sending in power control BOTH using
- * a regulator AND the 4 MMCIPWR bits. If we don't have
- * a regulator, we might have some other platform specific
- * power control behind this translate function.
+ * The ST Micro variant doesn't have the PL180s MCI_PWR_UP
+ * and instead uses MCI_PWR_ON so apply whatever value is
+ * configured in the variant data.
*/
- if (!host->vcc && host->plat->translate_vdd)
- pwr |= host->plat->translate_vdd(mmc_dev(mmc), ios->vdd);
- /* The ST version does not have this, fall through to POWER_ON */
- if (host->hw_designer != AMBA_VENDOR_ST) {
- pwr |= MCI_PWR_UP;
- break;
- }
+ pwr |= variant->pwrreg_powerup;
+
+ break;
case MMC_POWER_ON:
pwr |= MCI_PWR_ON;
break;
}
+ if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) {
+ /*
+ * The ST Micro variant has some additional bits
+ * indicating signal direction for the signals in
+ * the SD/MMC bus and feedback-clock usage. The
+ * fall-throughs in the code below are intentional.
+ */
+
+ if (host->plat->sigdir & MMCI_ST_DIRFBCLK)
+ pwr |= MCI_ST_FBCLKEN;
+ if (host->plat->sigdir & MMCI_ST_DIRCMD)
+ pwr |= MCI_ST_CMDDIREN;
+
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_8:
+ if (host->plat->sigdir & MMCI_ST_DIRDAT74)
+ pwr |= MCI_ST_DATA74DIREN;
+ case MMC_BUS_WIDTH_4:
+ if (host->plat->sigdir & MMCI_ST_DIRDAT31)
+ pwr |= MCI_ST_DATA31DIREN;
+ if (host->plat->sigdir & MMCI_ST_DIRDAT2)
+ pwr |= MCI_ST_DATA2DIREN;
+ case MMC_BUS_WIDTH_1:
+ if (host->plat->sigdir & MMCI_ST_DIRDAT0)
+ pwr |= MCI_ST_DATA0DIREN;
+ break;
+ default:
+ dev_err(mmc_dev(mmc), "unsupported MMC bus width %d\n",
+ ios->bus_width);
+ break;
+ }
+ }
+
if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
if (host->hw_designer != AMBA_VENDOR_ST)
pwr |= MCI_ROD;
@@ -505,7 +1313,8 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
* The ST Micro variant use the ROD bit for something
* else and only has OD (Open Drain).
*/
- pwr |= MCI_OD;
+ if (mmc->card && (mmc->card->type == MMC_TYPE_MMC))
+ pwr |= MCI_OD;
}
}
@@ -528,18 +1337,23 @@ static int mmci_get_ro(struct mmc_host *mmc)
if (host->gpio_wp == -ENOSYS)
return -ENOSYS;
- return gpio_get_value(host->gpio_wp);
+ return gpio_get_value_cansleep(host->gpio_wp);
}
static int mmci_get_cd(struct mmc_host *mmc)
{
struct mmci_host *host = mmc_priv(mmc);
+ struct mmci_platform_data *plat = host->plat;
unsigned int status;
- if (host->gpio_cd == -ENOSYS)
- status = host->plat->status(mmc_dev(host->mmc));
- else
- status = !gpio_get_value(host->gpio_cd);
+ if (host->gpio_cd == -ENOSYS) {
+ if (!plat->status)
+ return 1; /* Assume always present */
+
+ status = plat->status(mmc_dev(host->mmc));
+ } else
+ status = !!gpio_get_value_cansleep(host->gpio_cd)
+ ^ plat->cd_invert;
/*
* Use positive logic throughout - status is zero for no card,
@@ -548,28 +1362,156 @@ static int mmci_get_cd(struct mmc_host *mmc)
return status;
}
+#ifdef CONFIG_PM
+static int mmci_enable(struct mmc_host *mmc)
+{
+ unsigned long flags;
+ struct mmci_host *host = mmc_priv(mmc);
+ int ret = 0;
+
+ clk_enable(host->clk);
+ if (host->vcc) {
+ ret = regulator_enable(host->vcc);
+ if (ret) {
+ dev_err(mmc_dev(host->mmc),
+ "failed to enable regulator %s\n",
+ host->plat->vcc);
+ return ret;
+ }
+ }
+
+ if (pm_runtime_get_sync(mmc->parent) < 0)
+ dev_err(mmc_dev(mmc), "failed pm_runtime_get_sync\n");
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ /* Restore registers for POWER, CLOCK and IRQMASK0 */
+ writel(host->clk_reg, host->base + MMCICLOCK);
+ writel(host->pwr_reg, host->base + MMCIPOWER);
+ writel(host->irqmask0_reg, host->base + MMCIMASK0);
+
+ if (host->variant->sdio &&
+ host->mmc->card &&
+ mmc_card_sdio(host->mmc->card)) {
+ /*
+ * The ST Micro variants has a special bit in the DATACTRL
+ * register to enable SDIO mode. This bit must be set otherwise
+ * no SDIO interrupts can be received.
+ */
+ writel(MCI_ST_DPSM_SDIOEN, host->base + MMCIDATACTRL);
+ }
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ /* Restore settings done by the vdd_handler. */
+ if (host->plat->vdd_handler)
+ host->plat->vdd_handler(mmc_dev(mmc),
+ mmc->ios.vdd,
+ mmc->ios.power_mode);
+
+ /*
+ * To be able to handle specific wake up scenarios for each host,
+ * the following function shall be implemented.
+ */
+ if (host->plat->wakeup_handler)
+ host->plat->wakeup_handler(host->mmc, true);
+
+ return ret;
+}
+
+static int mmci_disable(struct mmc_host *mmc, int lazy)
+{
+ unsigned long flags;
+ struct mmci_host *host = mmc_priv(mmc);
+
+ /*
+ * To be able to handle specific shutdown scenarios for each host,
+ * the following function shall be implemented.
+ */
+ if (host->plat->wakeup_handler)
+ host->plat->wakeup_handler(host->mmc, false);
+
+ /*
+ * Let the vdd_handler act on a POWER_OFF to potentially do some
+ * power save actions.
+ */
+ if (host->plat->vdd_handler)
+ host->plat->vdd_handler(mmc_dev(mmc), 0, MMC_POWER_OFF);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ /* Save registers for POWER, CLOCK and IRQMASK0 */
+ host->irqmask0_reg = readl(host->base + MMCIMASK0);
+ host->pwr_reg = readl(host->base + MMCIPOWER);
+ host->clk_reg = readl(host->base + MMCICLOCK);
+
+ /*
+ * Make sure we do not get any interrupts when we disabled the
+ * clock and the regulator and as well make sure to clear the
+ * registers for clock and power.
+ */
+ writel(0, host->base + MMCIMASK0);
+ writel(0, host->base + MMCIPOWER);
+ writel(0, host->base + MMCICLOCK);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ if (pm_runtime_put_sync(mmc->parent) < 0)
+ dev_err(mmc_dev(mmc), "failed pm_runtime_put_sync\n");
+
+ clk_disable(host->clk);
+ if (host->vcc) {
+ regulator_disable(host->vcc);
+ }
+
+ return 0;
+}
+#else
+#define mmci_enable NULL
+#define mmci_disable NULL
+#endif
+
+static irqreturn_t mmci_cd_irq(int irq, void *dev_id)
+{
+ struct mmci_host *host = dev_id;
+
+ mmc_detect_change(host->mmc, msecs_to_jiffies(500));
+
+ return IRQ_HANDLED;
+}
+
+static void mmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ unsigned long flags;
+ unsigned int mask0;
+ struct mmci_host *host = mmc_priv(mmc);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ mask0 = readl(host->base + MMCIMASK0);
+ if (enable)
+ mask0 |= MCI_SDIOIT;
+ else
+ mask0 &= ~MCI_SDIOIT;
+ writel(mask0, host->base + MMCIMASK0);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
static const struct mmc_host_ops mmci_ops = {
.request = mmci_request,
.set_ios = mmci_set_ios,
.get_ro = mmci_get_ro,
.get_cd = mmci_get_cd,
+ .enable = mmci_enable,
+ .disable = mmci_disable,
+ .enable_sdio_irq = mmci_enable_sdio_irq,
};
-static void mmci_check_status(unsigned long data)
-{
- struct mmci_host *host = (struct mmci_host *)data;
- unsigned int status = mmci_get_cd(host->mmc);
-
- if (status ^ host->oldstat)
- mmc_detect_change(host->mmc, 0);
-
- host->oldstat = status;
- mod_timer(&host->timer, jiffies + HZ);
-}
-
static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
{
struct mmci_platform_data *plat = dev->dev.platform_data;
+ struct variant_data *variant = id->data;
struct mmci_host *host;
struct mmc_host *mmc;
int ret;
@@ -592,9 +1534,16 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
host = mmc_priv(mmc);
host->mmc = mmc;
+ host->plat = plat;
+ host->variant = variant;
host->gpio_wp = -ENOSYS;
host->gpio_cd = -ENOSYS;
+ host->gpio_cd_irq = -1;
+
+ host->irqmask0_reg = 0;
+ host->pwr_reg = 0;
+ host->clk_reg = 0;
host->hw_designer = amba_manf(dev);
host->hw_revision = amba_rev(dev);
@@ -608,11 +1557,6 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
goto host_free;
}
- ret = clk_enable(host->clk);
- if (ret)
- goto clk_free;
-
- host->plat = plat;
host->mclk = clk_get_rate(host->clk);
/*
* According to the spec, mclk is max 100 MHz,
@@ -622,15 +1566,16 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
if (host->mclk > 100000000) {
ret = clk_set_rate(host->clk, 100000000);
if (ret < 0)
- goto clk_disable;
+ goto clk_free;
host->mclk = clk_get_rate(host->clk);
dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
host->mclk);
}
+ host->phybase = dev->res.start;
host->base = ioremap(dev->res.start, resource_size(&dev->res));
if (!host->base) {
ret = -ENOMEM;
- goto clk_disable;
+ goto clk_free;
}
mmc->ops = &mmci_ops;
@@ -649,14 +1594,31 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
mmc->f_max = min(host->mclk, fmax);
dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
-#ifdef CONFIG_REGULATOR
- /* If we're using the regulator framework, try to fetch a regulator */
- host->vcc = regulator_get(&dev->dev, "vmmc");
- if (IS_ERR(host->vcc))
- host->vcc = NULL;
- else {
- int mask = mmc_regulator_get_ocrmask(host->vcc);
+ /* Host regulator */
+ if (plat->vcc) {
+ host->vcc = regulator_get(&dev->dev, plat->vcc);
+ ret = IS_ERR(host->vcc);
+ if (ret) {
+ host->vcc = NULL;
+ goto clk_free;
+ }
+ dev_dbg(mmc_dev(host->mmc), "fetched regulator %s\n",
+ plat->vcc);
+ }
+ /* Card regulator (could be a level shifter) */
+ if (plat->vcard) {
+ int mask;
+ host->vcard = regulator_get(&dev->dev, plat->vcard);
+ ret = IS_ERR(host->vcard);
+ if (ret) {
+ host->vcard = NULL;
+ goto vcc_free;
+ }
+ dev_dbg(mmc_dev(host->mmc), "fetched regulator %s\n",
+ plat->vcard);
+
+ mask = mmc_regulator_get_ocrmask(host->vcard);
if (mask < 0)
dev_err(&dev->dev, "error getting OCR mask (%d)\n",
mask);
@@ -667,30 +1629,42 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
"Provided ocr_mask/setpower will not be used "
"(using regulator instead)\n");
}
- }
-#endif
- /* Fall back to platform data if no regulator is found */
- if (host->vcc == NULL)
+
+ } else {
+ /* Use platform data if no regulator for vcard is used */
mmc->ocr_avail = plat->ocr_mask;
+ }
+
+ /* Use platform capabilities */
mmc->caps = plat->capabilities;
+ /* Set disable timeout if supported */
+ if (mmc->caps & MMC_CAP_DISABLE)
+ mmc_set_disable_delay(mmc, plat->disable);
+
+ /* We support these PM capabilities. */
+ mmc->pm_caps = MMC_PM_KEEP_POWER;
+
/*
* We can do SGIO
*/
- mmc->max_hw_segs = 16;
- mmc->max_phys_segs = NR_SG;
+ mmc->max_segs = NR_SG;
/*
- * Since we only have a 16-bit data length register, we must
- * ensure that we don't exceed 2^16-1 bytes in a single request.
+ * Since only a certain number of bits are valid in the data length
+ * register, we must ensure that we don't exceed 2^num-1 bytes in a
+ * single request.
*/
- mmc->max_req_size = 65535;
+ mmc->max_req_size = (1 << variant->datalength_bits) - 1;
/*
- * Set the maximum segment size. Since we aren't doing DMA
- * (yet) we are only limited by the data length register.
+ * Set the maximum segment size. Right now DMA sets the
+ * limit and not the data length register. Thus until the DMA
+ * driver not handles this, the segment size is limited by DMA.
+ * DMA limit: src_addr_width x (64 KB -1). src_addr_width
+ * can be 1.
*/
- mmc->max_seg_size = mmc->max_req_size;
+ mmc->max_seg_size = 65535;
/*
* Block size can be up to 2048 bytes, but must be a power of two.
@@ -704,10 +1678,6 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
spin_lock_init(&host->lock);
- writel(0, host->base + MMCIMASK0);
- writel(0, host->base + MMCIMASK1);
- writel(0xfff, host->base + MMCICLEAR);
-
if (gpio_is_valid(plat->gpio_cd)) {
ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)");
if (ret == 0)
@@ -716,6 +1686,13 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
host->gpio_cd = plat->gpio_cd;
else if (ret != -ENOSYS)
goto err_gpio_cd;
+
+ ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd),
+ mmci_cd_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ DRIVER_NAME " (cd)", host);
+ if (ret >= 0)
+ host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd);
}
if (gpio_is_valid(plat->gpio_wp)) {
ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)");
@@ -727,45 +1704,70 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
goto err_gpio_wp;
}
- ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host);
+ if ((host->plat->status || host->gpio_cd != -ENOSYS)
+ && host->gpio_cd_irq < 0)
+ mmc->caps |= MMC_CAP_NEEDS_POLL;
+
+ mmci_setup_dma(host);
+
+ ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED,
+ DRIVER_NAME " (cmd)", host);
if (ret)
goto unmap;
- ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, DRIVER_NAME " (pio)", host);
- if (ret)
- goto irq0_free;
+ if (dev->irq[1] == NO_IRQ)
+ host->singleirq = true;
+ else {
+ ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED,
+ DRIVER_NAME " (pio)", host);
+ if (ret)
+ goto irq0_free;
+ }
- writel(MCI_IRQENABLE, host->base + MMCIMASK0);
+ /* Prepare IRQMASK0 */
+ host->irqmask0_reg = MCI_IRQENABLE;
+ if (host->variant->broken_blockend)
+ host->irqmask0_reg &= ~MCI_DATABLOCKEND;
amba_set_drvdata(dev, mmc);
- host->oldstat = mmci_get_cd(host->mmc);
+
+ pm_runtime_enable(mmc->parent);
+ if (pm_runtime_get_sync(mmc->parent) < 0)
+ dev_err(mmc_dev(mmc), "failed pm_runtime_get_sync\n");
+ if (pm_runtime_put_sync(mmc->parent) < 0)
+ dev_err(mmc_dev(mmc), "failed pm_runtime_put_sync\n");
mmc_add_host(mmc);
- dev_info(&dev->dev, "%s: MMCI rev %x cfg %02x at 0x%016llx irq %d,%d\n",
- mmc_hostname(mmc), amba_rev(dev), amba_config(dev),
- (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]);
+ dev_info(&dev->dev,
+ "%s: MMCI/PL180 manf %x rev %x cfg %02x at 0x%016llx\n",
+ mmc_hostname(mmc), amba_manf(dev), amba_rev(dev),
+ amba_config(dev), (unsigned long long)dev->res.start);
+ dev_info(&dev->dev, "IRQ %d, %d (pio)\n", dev->irq[0], dev->irq[1]);
- init_timer(&host->timer);
- host->timer.data = (unsigned long)host;
- host->timer.function = mmci_check_status;
- host->timer.expires = jiffies + HZ;
- add_timer(&host->timer);
+ /* Ugly hack for u8500_sdio_detect_card, to be removed soon. */
+ sdio_host_ptr = host;
+
+ mmci_debugfs_create(host);
return 0;
irq0_free:
free_irq(dev->irq[0], host);
unmap:
+ mmci_disable_dma(host);
if (host->gpio_wp != -ENOSYS)
gpio_free(host->gpio_wp);
err_gpio_wp:
+ if (host->gpio_cd_irq >= 0)
+ free_irq(host->gpio_cd_irq, host);
if (host->gpio_cd != -ENOSYS)
gpio_free(host->gpio_cd);
err_gpio_cd:
iounmap(host->base);
- clk_disable:
- clk_disable(host->clk);
+ vcc_free:
+ if (host->vcc)
+ regulator_put(host->vcc);
clk_free:
clk_put(host->clk);
host_free:
@@ -785,8 +1787,12 @@ static int __devexit mmci_remove(struct amba_device *dev)
if (mmc) {
struct mmci_host *host = mmc_priv(mmc);
- del_timer_sync(&host->timer);
+ pm_runtime_disable(mmc->parent);
+ if (host->vcc)
+ regulator_enable(host->vcc);
+
+ mmci_debugfs_remove(host);
mmc_remove_host(mmc);
writel(0, host->base + MMCIMASK0);
@@ -795,11 +1801,15 @@ static int __devexit mmci_remove(struct amba_device *dev)
writel(0, host->base + MMCICOMMAND);
writel(0, host->base + MMCIDATACTRL);
+ mmci_disable_dma(host);
free_irq(dev->irq[0], host);
- free_irq(dev->irq[1], host);
+ if (!host->singleirq)
+ free_irq(dev->irq[1], host);
if (host->gpio_wp != -ENOSYS)
gpio_free(host->gpio_wp);
+ if (host->gpio_cd_irq >= 0)
+ free_irq(host->gpio_cd_irq, host);
if (host->gpio_cd != -ENOSYS)
gpio_free(host->gpio_cd);
@@ -807,9 +1817,15 @@ static int __devexit mmci_remove(struct amba_device *dev)
clk_disable(host->clk);
clk_put(host->clk);
- if (regulator_is_enabled(host->vcc))
+ if (host->vcc) {
regulator_disable(host->vcc);
- regulator_put(host->vcc);
+ regulator_put(host->vcc);
+ }
+
+ if (host->vcard) {
+ mmc_regulator_set_ocr(mmc, host->vcard, 0);
+ regulator_put(host->vcard);
+ }
mmc_free_host(mmc);
@@ -826,11 +1842,38 @@ static int mmci_suspend(struct amba_device *dev, pm_message_t state)
int ret = 0;
if (mmc) {
- struct mmci_host *host = mmc_priv(mmc);
+ /*
+ * The host must be claimed to prevent request handling etc.
+ * Also make sure to not sleep, since we must return with a
+ * response quite quickly.
+ */
+ if (mmc_try_claim_host(mmc)) {
+ struct mmci_host *host = mmc_priv(mmc);
+
+ if (mmc->enabled) {
+ /*
+ * Do not suspend if the host has not been
+ * disabled.
+ */
+ mmc_do_release_host(mmc);
+ ret = -EBUSY;
+ } else if ((!(host->plat->pm_flags & MMC_PM_KEEP_POWER))
+ && (!(mmc->pm_flags & MMC_PM_KEEP_POWER))) {
+ /* Cut the power to the card to save current. */
+ mmc_host_enable(mmc);
+ mmc_power_save_host(mmc);
+ mmc_host_disable(mmc);
+ host->pwr_reg = 0;
+ host->clk_reg = 0;
+ }
- ret = mmc_suspend_host(mmc);
- if (ret == 0)
- writel(0, host->base + MMCIMASK0);
+ } else {
+ /*
+ * We did not manage to claim the host, thus someone
+ * is still using it. Do not suspend.
+ */
+ ret = -EBUSY;
+ }
}
return ret;
@@ -839,17 +1882,26 @@ static int mmci_suspend(struct amba_device *dev, pm_message_t state)
static int mmci_resume(struct amba_device *dev)
{
struct mmc_host *mmc = amba_get_drvdata(dev);
- int ret = 0;
if (mmc) {
struct mmci_host *host = mmc_priv(mmc);
- writel(MCI_IRQENABLE, host->base + MMCIMASK0);
+ /* We expect the host to be claimed. */
+ WARN_ON(!mmc->claimed);
- ret = mmc_resume_host(mmc);
+ /* Restore power and re-initialize the card. */
+ if ((!(host->plat->pm_flags & MMC_PM_KEEP_POWER)) &&
+ (!(mmc->pm_flags & MMC_PM_KEEP_POWER))) {
+ mmc_host_enable(mmc);
+ mmc_power_restore_host(mmc);
+ mmc_host_disable(mmc);
+ }
+
+ /* Release the host to provide access to it again. */
+ mmc_do_release_host(mmc);
}
- return ret;
+ return 0;
}
#else
#define mmci_suspend NULL
@@ -860,19 +1912,28 @@ static struct amba_id mmci_ids[] = {
{
.id = 0x00041180,
.mask = 0x000fffff,
+ .data = &variant_arm,
},
{
.id = 0x00041181,
.mask = 0x000fffff,
+ .data = &variant_arm,
},
/* ST Micro variants */
{
.id = 0x00180180,
.mask = 0x00ffffff,
+ .data = &variant_u300,
},
{
.id = 0x00280180,
.mask = 0x00ffffff,
+ .data = &variant_u300,
+ },
+ {
+ .id = 0x00480180,
+ .mask = 0x00ffffff,
+ .data = &variant_ux500,
},
{ 0, 0 },
};