From 4330971889149f064e9af8e020fe098c628b627e Mon Sep 17 00:00:00 2001 From: Carlos Chinea Date: Tue, 14 Dec 2010 10:09:38 +0000 Subject: HSI: omap_ssi: Introducing OMAP SSI driver Introduces the OMAP SSI driver in the kernel. The Synchronous Serial Interface (SSI) is a legacy version of HSI. As in the case of HSI, it is mainly used to connect Application engines (APE) with cellular modem engines (CMT) in cellular handsets. It provides a multichannel, full-duplex, multi-core communication with no reference clock. The OMAP SSI block is capable of reaching speeds of 110 Mbit/s. Change-Id: Iea002a8f321cf66ab439552620af001206626ed6 Signed-off-by: Carlos Chinea Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/20576 Reviewed-by: Pawel SZYSZUK Tested-by: Pawel SZYSZUK Reviewed-by: Jonas ABERG --- arch/arm/mach-omap2/ssi.c | 134 +++ arch/arm/plat-omap/include/plat/ssi.h | 204 ++++ drivers/hsi/controllers/omap_ssi.c | 1853 +++++++++++++++++++++++++++++++++ 3 files changed, 2191 insertions(+) create mode 100644 arch/arm/mach-omap2/ssi.c create mode 100644 arch/arm/plat-omap/include/plat/ssi.h create mode 100644 drivers/hsi/controllers/omap_ssi.c diff --git a/arch/arm/mach-omap2/ssi.c b/arch/arm/mach-omap2/ssi.c new file mode 100644 index 00000000000..e822a77f5ca --- /dev/null +++ b/arch/arm/mach-omap2/ssi.c @@ -0,0 +1,134 @@ +/* + * linux/arch/arm/mach-omap2/ssi.c + * + * Copyright (C) 2010 Nokia Corporation. All rights reserved. + * + * Contact: Carlos Chinea + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include + +static struct omap_ssi_platform_data ssi_pdata = { + .num_ports = SSI_NUM_PORTS, + .get_dev_context_loss_count = omap_pm_get_dev_context_loss_count, +}; + +static struct resource ssi_resources[] = { + /* SSI controller */ + [0] = { + .start = 0x48058000, + .end = 0x48058fff, + .name = "omap_ssi_sys", + .flags = IORESOURCE_MEM, + }, + /* GDD */ + [1] = { + .start = 0x48059000, + .end = 0x48059fff, + .name = "omap_ssi_gdd", + .flags = IORESOURCE_MEM, + }, + [2] = { + .start = 71, + .end = 71, + .name = "ssi_gdd", + .flags = IORESOURCE_IRQ, + }, + /* SSI port 1 */ + [3] = { + .start = 0x4805a000, + .end = 0x4805a7ff, + .name = "omap_ssi_sst1", + .flags = IORESOURCE_MEM, + }, + [4] = { + .start = 0x4805a800, + .end = 0x4805afff, + .name = "omap_ssi_ssr1", + .flags = IORESOURCE_MEM, + }, + [5] = { + .start = 67, + .end = 67, + .name = "ssi_p1_mpu_irq0", + .flags = IORESOURCE_IRQ, + }, + [6] = { + .start = 68, + .end = 68, + .name = "ssi_p1_mpu_irq1", + .flags = IORESOURCE_IRQ, + }, + [7] = { + .start = 0, + .end = 0, + .name = "ssi_p1_cawake", + .flags = IORESOURCE_IRQ | IORESOURCE_UNSET, + }, +}; + +static struct platform_device ssi_pdev = { + .name = "omap_ssi", + .id = 0, + .num_resources = ARRAY_SIZE(ssi_resources), + .resource = ssi_resources, + .dev = { + .platform_data = &ssi_pdata, + }, +}; + +int __init omap_ssi_config(struct omap_ssi_board_config *ssi_config) +{ + unsigned int port, offset, cawake_gpio; + int err; + + ssi_pdata.num_ports = ssi_config->num_ports; + for (port = 0, offset = 7; port < ssi_config->num_ports; + port++, offset += 5) { + cawake_gpio = ssi_config->cawake_gpio[port]; + if (!cawake_gpio) + continue; /* Nothing to do */ + err = gpio_request(cawake_gpio, "cawake"); + if (err < 0) + goto rback; + gpio_direction_input(cawake_gpio); + ssi_resources[offset].start = gpio_to_irq(cawake_gpio); + ssi_resources[offset].flags &= ~IORESOURCE_UNSET; + ssi_resources[offset].flags |= IORESOURCE_IRQ_HIGHEDGE | + IORESOURCE_IRQ_LOWEDGE; + } + + return 0; +rback: + dev_err(&ssi_pdev.dev, "Request cawake (gpio%d) failed\n", cawake_gpio); + while (port > 0) + gpio_free(ssi_config->cawake_gpio[--port]); + + return err; +} + +static int __init ssi_init(void) +{ + return platform_device_register(&ssi_pdev); +} +subsys_initcall(ssi_init); diff --git a/arch/arm/plat-omap/include/plat/ssi.h b/arch/arm/plat-omap/include/plat/ssi.h new file mode 100644 index 00000000000..eb84c3a69f7 --- /dev/null +++ b/arch/arm/plat-omap/include/plat/ssi.h @@ -0,0 +1,204 @@ +/* + * plat/ssi.h + * + * Hardware definitions for SSI. + * + * Copyright (C) 2010 Nokia Corporation. All rights reserved. + * + * Contact: Carlos Chinea + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef __OMAP_SSI_REGS_H__ +#define __OMAP_SSI_REGS_H__ + +#define SSI_NUM_PORTS 1 +/* + * SSI SYS registers + */ +#define SSI_REVISION_REG 0 +# define SSI_REV_MAJOR 0xf0 +# define SSI_REV_MINOR 0xf +#define SSI_SYSCONFIG_REG 0x10 +# define SSI_AUTOIDLE (1 << 0) +# define SSI_SOFTRESET (1 << 1) +# define SSI_SIDLEMODE_FORCE 0 +# define SSI_SIDLEMODE_NO (1 << 3) +# define SSI_SIDLEMODE_SMART (1 << 4) +# define SSI_SIDLEMODE_MASK 0x18 +# define SSI_MIDLEMODE_FORCE 0 +# define SSI_MIDLEMODE_NO (1 << 12) +# define SSI_MIDLEMODE_SMART (1 << 13) +# define SSI_MIDLEMODE_MASK 0x3000 +#define SSI_SYSSTATUS_REG 0x14 +# define SSI_RESETDONE 1 +#define SSI_MPU_STATUS_REG(port, irq) (0x808 + ((port) * 0x10) + ((irq) * 2)) +#define SSI_MPU_ENABLE_REG(port, irq) (0x80c + ((port) * 0x10) + ((irq) * 8)) +# define SSI_DATAACCEPT(channel) (1 << (channel)) +# define SSI_DATAAVAILABLE(channel) (1 << ((channel) + 8)) +# define SSI_DATAOVERRUN(channel) (1 << ((channel) + 16)) +# define SSI_ERROROCCURED (1 << 24) +# define SSI_BREAKDETECTED (1 << 25) +#define SSI_GDD_MPU_IRQ_STATUS_REG 0x0800 +#define SSI_GDD_MPU_IRQ_ENABLE_REG 0x0804 +# define SSI_GDD_LCH(channel) (1 << (channel)) +#define SSI_WAKE_REG(port) (0xc00 + ((port) * 0x10)) +#define SSI_CLEAR_WAKE_REG(port) (0xc04 + ((port) * 0x10)) +#define SSI_SET_WAKE_REG(port) (0xc08 + ((port) * 0x10)) +# define SSI_WAKE(channel) (1 << (channel)) +# define SSI_WAKE_MASK 0xff + +/* + * SSI SST registers + */ +#define SSI_SST_ID_REG 0 +#define SSI_SST_MODE_REG 4 +# define SSI_MODE_VAL_MASK 3 +# define SSI_MODE_SLEEP 0 +# define SSI_MODE_STREAM 1 +# define SSI_MODE_FRAME 2 +# define SSI_MODE_MULTIPOINTS 3 +#define SSI_SST_FRAMESIZE_REG 8 +# define SSI_FRAMESIZE_DEFAULT 31 +#define SSI_SST_TXSTATE_REG 0xc +# define SSI_TXSTATE_IDLE 0 +#define SSI_SST_BUFSTATE_REG 0x10 +# define SSI_FULL(channel) (1 << (channel)) +#define SSI_SST_DIVISOR_REG 0x18 +# define SSI_MAX_DIVISOR 127 +#define SSI_SST_BREAK_REG 0x20 +#define SSI_SST_CHANNELS_REG 0x24 +# define SSI_CHANNELS_DEFAULT 4 +#define SSI_SST_ARBMODE_REG 0x28 +# define SSI_ARBMODE_ROUNDROBIN 0 +# define SSI_ARBMODE_PRIORITY 1 +#define SSI_SST_BUFFER_CH_REG(channel) (0x80 + ((channel) * 4)) +#define SSI_SST_SWAPBUF_CH_REG(channel) (0xc0 + ((channel) * 4)) + +/* + * SSI SSR registers + */ +#define SSI_SSR_ID_REG 0 +#define SSI_SSR_MODE_REG 4 +#define SSI_SSR_FRAMESIZE_REG 8 +#define SSI_SSR_RXSTATE_REG 0xc +#define SSI_SSR_BUFSTATE_REG 0x10 +# define SSI_NOTEMPTY(channel) (1 << (channel)) +#define SSI_SSR_BREAK_REG 0x1c +#define SSI_SSR_ERROR_REG 0x20 +#define SSI_SSR_ERRORACK_REG 0x24 +#define SSI_SSR_OVERRUN_REG 0x2c +#define SSI_SSR_OVERRUNACK_REG 0x30 +#define SSI_SSR_TIMEOUT_REG 0x34 +# define SSI_TIMEOUT_DEFAULT 0 +#define SSI_SSR_CHANNELS_REG 0x28 +#define SSI_SSR_BUFFER_CH_REG(channel) (0x80 + ((channel) * 4)) +#define SSI_SSR_SWAPBUF_CH_REG(channel) (0xc0 + ((channel) * 4)) + +/* + * SSI GDD registers + */ +#define SSI_GDD_HW_ID_REG 0 +#define SSI_GDD_PPORT_ID_REG 0x10 +#define SSI_GDD_MPORT_ID_REG 0x14 +#define SSI_GDD_PPORT_SR_REG 0x20 +#define SSI_GDD_MPORT_SR_REG 0x24 +# define SSI_ACTIVE_LCH_NUM_MASK 0xff +#define SSI_GDD_TEST_REG 0x40 +# define SSI_TEST 1 +#define SSI_GDD_GCR_REG 0x100 +# define SSI_CLK_AUTOGATING_ON (1 << 3) +# define SSI_FREE (1 << 2) +# define SSI_SWITCH_OFF (1 << 0) +#define SSI_GDD_GRST_REG 0x200 +# define SSI_SWRESET 1 +#define SSI_GDD_CSDP_REG(channel) (0x800 + ((channel) * 0x40)) +# define SSI_DST_BURST_EN_MASK 0xc000 +# define SSI_DST_SINGLE_ACCESS0 0 +# define SSI_DST_SINGLE_ACCESS (1 << 14) +# define SSI_DST_BURST_4x32_BIT (2 << 14) +# define SSI_DST_BURST_8x32_BIT (3 << 14) +# define SSI_DST_MASK 0x1e00 +# define SSI_DST_MEMORY_PORT (8 << 9) +# define SSI_DST_PERIPHERAL_PORT (9 << 9) +# define SSI_SRC_BURST_EN_MASK 0x180 +# define SSI_SRC_SINGLE_ACCESS0 0 +# define SSI_SRC_SINGLE_ACCESS (1 << 7) +# define SSI_SRC_BURST_4x32_BIT (2 << 7) +# define SSI_SRC_BURST_8x32_BIT (3 << 7) +# define SSI_SRC_MASK 0x3c +# define SSI_SRC_MEMORY_PORT (8 << 2) +# define SSI_SRC_PERIPHERAL_PORT (9 << 2) +# define SSI_DATA_TYPE_MASK 3 +# define SSI_DATA_TYPE_S32 2 +#define SSI_GDD_CCR_REG(channel) (0x802 + ((channel) * 0x40)) +# define SSI_DST_AMODE_MASK (3 << 14) +# define SSI_DST_AMODE_CONST 0 +# define SSI_DST_AMODE_POSTINC (1 << 12) +# define SSI_SRC_AMODE_MASK (3 << 12) +# define SSI_SRC_AMODE_CONST 0 +# define SSI_SRC_AMODE_POSTINC (1 << 12) +# define SSI_CCR_ENABLE (1 << 7) +# define SSI_CCR_SYNC_MASK 0x1f +#define SSI_GDD_CICR_REG(channel) (0x804 + ((channel) * 0x40)) +# define SSI_BLOCK_IE (1 << 5) +# define SSI_HALF_IE (1 << 2) +# define SSI_TOUT_IE (1 << 0) +#define SSI_GDD_CSR_REG(channel) (0x806 + ((channel) * 0x40)) +# define SSI_CSR_SYNC (1 << 6) +# define SSI_CSR_BLOCK (1 << 5) +# define SSI_CSR_HALF (1 << 2) +# define SSI_CSR_TOUR (1 << 0) +#define SSI_GDD_CSSA_REG(channel) (0x808 + ((channel) * 0x40)) +#define SSI_GDD_CDSA_REG(channel) (0x80c + ((channel) * 0x40)) +#define SSI_GDD_CEN_REG(channel) (0x810 + ((channel) * 0x40)) +#define SSI_GDD_CSAC_REG(channel) (0x818 + ((channel) * 0x40)) +#define SSI_GDD_CDAC_REG(channel) (0x81a + ((channel) * 0x40)) +#define SSI_GDD_CLNK_CTRL_REG(channel) (0x828 + ((channel) * 0x40)) +# define SSI_ENABLE_LNK (1 << 15) +# define SSI_STOP_LNK (1 << 14) +# define SSI_NEXT_CH_ID_MASK 0xf + +/** + * struct omap_ssi_platform_data - OMAP SSI platform data + * @num_ports: Number of ports on the controller + * @ctxt_loss_count: Pointer to omap_pm_get_dev_context_loss_count + */ +struct omap_ssi_platform_data { + unsigned int num_ports; + int (*get_dev_context_loss_count)(struct device *dev); +}; + +/** + * struct omap_ssi_config - SSI board configuration + * @num_ports: Number of ports in use + * @cawake_line: Array of cawake gpio lines + */ +struct omap_ssi_board_config { + unsigned int num_ports; + int cawake_gpio[SSI_NUM_PORTS]; +}; + +#ifdef CONFIG_OMAP_SSI_CONFIG +extern int omap_ssi_config(struct omap_ssi_board_config *ssi_config); +#else +static inline int omap_ssi_config(struct omap_ssi_board_config *ssi_config) +{ + return 0; +} +#endif /* CONFIG_OMAP_SSI_CONFIG */ + +#endif /* __OMAP_SSI_REGS_H__ */ diff --git a/drivers/hsi/controllers/omap_ssi.c b/drivers/hsi/controllers/omap_ssi.c new file mode 100644 index 00000000000..a82ea0e13cc --- /dev/null +++ b/drivers/hsi/controllers/omap_ssi.c @@ -0,0 +1,1853 @@ +/* + * omap_ssi.c + * + * Implements the OMAP SSI driver. + * + * Copyright (C) 2010 Nokia Corporation. All rights reserved. + * + * Contact: Carlos Chinea + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SSI_MAX_CHANNELS 8 +#define SSI_MAX_GDD_LCH 8 +#define SSI_BYTES_TO_FRAMES(x) ((((x) - 1) >> 2) + 1) + +/** + * struct ssi_clk_res - Device resource data for the SSI clocks + * @clk: Pointer to the clock + * @nb: Pointer to the clock notifier for clk, if any + */ +struct ssi_clk_res { + struct clk *clk; + struct notifier_block *nb; +}; + +/** + * struct gdd_trn - GDD transaction data + * @msg: Pointer to the HSI message being served + * @sg: Pointer to the current sg entry being served + */ +struct gdd_trn { + struct hsi_msg *msg; + struct scatterlist *sg; +}; + +/** + * struct omap_ssm_ctx - OMAP synchronous serial module (TX/RX) context + * @mode: Bit transmission mode + * @channels: Number of channels + * @framesize: Frame size in bits + * @timeout: RX frame timeout + * @divisor: TX divider + * @arb_mode: Arbitration mode for TX frame (Round robin, priority) + */ +struct omap_ssm_ctx { + u32 mode; + u32 channels; + u32 frame_size; + union { + u32 timeout; /* Rx Only */ + struct { + u32 arb_mode; + u32 divisor; + }; /* Tx only */ + }; +}; + +/** + * struct omap_ssi_port - OMAP SSI port data + * @dev: device associated to the port (HSI port) + * @sst_dma: SSI transmitter physical base address + * @ssr_dma: SSI receiver physical base address + * @sst_base: SSI transmitter base address + * @ssr_base: SSI receiver base address + * @wk_lock: spin lock to serialize access to the wake lines + * @lock: Spin lock to serialize access to the SSI port + * @channels: Current number of channels configured (1,2,4 or 8) + * @txqueue: TX message queues + * @rxqueue: RX message queues + * @brkqueue: Queue of incoming HWBREAK requests (FRAME mode) + * @irq: IRQ number + * @wake_irq: IRQ number for incoming wake line (-1 if none) + * @pio_tasklet: Bottom half for PIO transfers and events + * @wake_tasklet: Bottom half for incoming wake events + * @wkin_cken: Keep track of clock references due to the incoming wake line + * @wk_refcount: Reference count for output wake line + * @sys_mpu_enable: Context for the interrupt enable register for irq 0 + * @sst: Context for the synchronous serial transmitter + * @ssr: Context for the synchronous serial receiver + */ +struct omap_ssi_port { + struct device *dev; + dma_addr_t sst_dma; + dma_addr_t ssr_dma; + void __iomem *sst_base; + void __iomem *ssr_base; + spinlock_t wk_lock; + spinlock_t lock; + unsigned int channels; + struct list_head txqueue[SSI_MAX_CHANNELS]; + struct list_head rxqueue[SSI_MAX_CHANNELS]; + struct list_head brkqueue; + unsigned int irq; + int wake_irq; + struct tasklet_struct pio_tasklet; + struct tasklet_struct wake_tasklet; + unsigned int wkin_cken:1; /* Workaround */ + int wk_refcount; + /* OMAP SSI port context */ + u32 sys_mpu_enable; /* We use only one irq */ + struct omap_ssm_ctx sst; + struct omap_ssm_ctx ssr; +}; + +/** + * struct omap_ssi_controller - OMAP SSI controller data + * @dev: device associated to the controller (HSI controller) + * @sys: SSI I/O base address + * @gdd: GDD I/O base address + * @ick: SSI interconnect clock + * @fck: SSI functional clock + * @ck_refcount: References count for clocks + * @gdd_irq: IRQ line for GDD + * @gdd_tasklet: bottom half for DMA transfers + * @gdd_trn: Array of GDD transaction data for ongoing GDD transfers + * @lock: lock to serialize access to GDD + * @ck_lock: lock to serialize access to the clocks + * @loss_count: To follow if we need to restore context or not + * @max_speed: Maximum TX speed (Kb/s) set by the clients. + * @sysconfig: SSI controller saved context + * @gdd_gcr: SSI GDD saved context + * @get_loss: Pointer to omap_pm_get_dev_context_loss_count, if any + * @port: Array of pointers of the ports of the controller + * @dir: Debugfs SSI root directory + */ +struct omap_ssi_controller { + struct device *dev; + void __iomem *sys; + void __iomem *gdd; + struct clk *ick; + struct clk *fck; + int ck_refcount; + unsigned int gdd_irq; + struct tasklet_struct gdd_tasklet; + struct gdd_trn gdd_trn[SSI_MAX_GDD_LCH]; + spinlock_t lock; + spinlock_t ck_lock; + unsigned long fck_rate; + int loss_count; + u32 max_speed; + /* OMAP SSI Controller context */ + u32 sysconfig; + u32 gdd_gcr; + int (*get_loss)(struct device *dev); + struct omap_ssi_port **port; +#ifdef CONFIG_DEBUG_FS + struct dentry *dir; +#endif +}; + +static inline unsigned int ssi_wakein(struct hsi_port *port) +{ + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + + return gpio_get_value(irq_to_gpio(omap_port->wake_irq)); +} + +static int ssi_for_each_port(struct hsi_controller *ssi, void *data, + int (*fn)(struct omap_ssi_port *p, void *data)) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + unsigned int i = 0; + int err = 0; + + for (i = 0; ((i < ssi->num_ports) && !err); i++) + err = (*fn)(omap_ssi->port[i], data); + + return err; +} + +static int ssi_set_port_mode(struct omap_ssi_port *omap_port, void *data) +{ + u32 *mode = data; + + __raw_writel(*mode, omap_port->sst_base + SSI_SST_MODE_REG); + __raw_writel(*mode, omap_port->ssr_base + SSI_SSR_MODE_REG); + /* OCP barrier */ + *mode = __raw_readl(omap_port->ssr_base + SSI_SSR_MODE_REG); + + return 0; +} + +static inline void ssi_set_mode(struct hsi_controller *ssi, u32 mode) +{ + ssi_for_each_port(ssi, &mode, ssi_set_port_mode); +} + +static int ssi_restore_port_mode(struct omap_ssi_port *omap_port, + void *data __maybe_unused) +{ + u32 mode; + + __raw_writel(omap_port->sst.mode, + omap_port->sst_base + SSI_SST_MODE_REG); + __raw_writel(omap_port->ssr.mode, + omap_port->ssr_base + SSI_SSR_MODE_REG); + /* OCP barrier */ + mode = __raw_readl(omap_port->ssr_base + SSI_SSR_MODE_REG); + + return 0; +} + +static int ssi_restore_divisor(struct omap_ssi_port *omap_port, + void *data __maybe_unused) +{ + __raw_writel(omap_port->sst.divisor, + omap_port->sst_base + SSI_SST_DIVISOR_REG); + + return 0; +} + +static int ssi_restore_port_ctx(struct omap_ssi_port *omap_port, + void *data __maybe_unused) +{ + struct hsi_port *port = to_hsi_port(omap_port->dev); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + void __iomem *base = omap_port->sst_base; + + __raw_writel(omap_port->sys_mpu_enable, + omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + /* SST context */ + __raw_writel(omap_port->sst.frame_size, base + SSI_SST_FRAMESIZE_REG); + __raw_writel(omap_port->sst.channels, base + SSI_SST_CHANNELS_REG); + __raw_writel(omap_port->sst.arb_mode, base + SSI_SST_ARBMODE_REG); + /* SSR context */ + base = omap_port->ssr_base; + __raw_writel(omap_port->ssr.frame_size, base + SSI_SSR_FRAMESIZE_REG); + __raw_writel(omap_port->ssr.channels, base + SSI_SSR_CHANNELS_REG); + __raw_writel(omap_port->ssr.timeout, base + SSI_SSR_TIMEOUT_REG); + + return 0; +} + +static int ssi_save_port_ctx(struct omap_ssi_port *omap_port, + void *data __maybe_unused) +{ + struct hsi_port *port = to_hsi_port(omap_port->dev); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + omap_port->sys_mpu_enable = __raw_readl(omap_ssi->sys + + SSI_MPU_ENABLE_REG(port->num, 0)); + + return 0; +} + +static int ssi_clk_enable(struct hsi_controller *ssi) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + int err = 0; + + spin_lock_bh(&omap_ssi->ck_lock); + if (omap_ssi->ck_refcount++) + goto out; + err = clk_enable(omap_ssi->fck); + if (unlikely(err < 0)) + goto out; + err = clk_enable(omap_ssi->ick); + if (unlikely(err < 0)) { + clk_disable(omap_ssi->fck); + goto out; + } + if ((omap_ssi->get_loss) && (omap_ssi->loss_count == + (*omap_ssi->get_loss)(ssi->device.parent))) + goto mode; /* We always need to restore the mode & TX divisor */ + + __raw_writel(omap_ssi->sysconfig, omap_ssi->sys + SSI_SYSCONFIG_REG); + __raw_writel(omap_ssi->gdd_gcr, omap_ssi->gdd + SSI_GDD_GCR_REG); + + ssi_for_each_port(ssi, NULL, ssi_restore_port_ctx); +mode: + ssi_for_each_port(ssi, NULL, ssi_restore_divisor); + ssi_for_each_port(ssi, NULL, ssi_restore_port_mode); +out: + spin_unlock_bh(&omap_ssi->ck_lock); + + return err; +} + +static void ssi_clk_disable(struct hsi_controller *ssi) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + spin_lock_bh(&omap_ssi->ck_lock); + WARN_ON(omap_ssi->ck_refcount <= 0); + if (--omap_ssi->ck_refcount) + goto out; + + ssi_set_mode(ssi, SSI_MODE_SLEEP); + + if (omap_ssi->get_loss) + omap_ssi->loss_count = + (*omap_ssi->get_loss)(ssi->device.parent); + + ssi_for_each_port(ssi, NULL, ssi_save_port_ctx); + clk_disable(omap_ssi->ick); + clk_disable(omap_ssi->fck); +out: + spin_unlock_bh(&omap_ssi->ck_lock); +} + +#ifdef CONFIG_DEBUG_FS +static int ssi_debug_show(struct seq_file *m, void *p __maybe_unused) +{ + struct hsi_controller *ssi = m->private; + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + void __iomem *sys = omap_ssi->sys; + + ssi_clk_enable(ssi); + seq_printf(m, "REVISION\t: 0x%08x\n", + __raw_readl(sys + SSI_REVISION_REG)); + seq_printf(m, "SYSCONFIG\t: 0x%08x\n", + __raw_readl(sys + SSI_SYSCONFIG_REG)); + seq_printf(m, "SYSSTATUS\t: 0x%08x\n", + __raw_readl(sys + SSI_SYSSTATUS_REG)); + ssi_clk_disable(ssi); + + return 0; +} + +static int ssi_debug_port_show(struct seq_file *m, void *p __maybe_unused) +{ + struct hsi_port *port = m->private; + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + void __iomem *base = omap_ssi->sys; + unsigned int ch; + + ssi_clk_enable(ssi); + if (omap_port->wake_irq > 0) + seq_printf(m, "CAWAKE\t\t: %d\n", ssi_wakein(port)); + seq_printf(m, "WAKE\t\t: 0x%08x\n", + __raw_readl(base + SSI_WAKE_REG(port->num))); + seq_printf(m, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", 0, + __raw_readl(base + SSI_MPU_ENABLE_REG(port->num, 0))); + seq_printf(m, "MPU_STATUS_IRQ%d\t: 0x%08x\n", 0, + __raw_readl(base + SSI_MPU_STATUS_REG(port->num, 0))); + /* SST */ + base = omap_port->sst_base; + seq_printf(m, "\nSST\n===\n"); + seq_printf(m, "ID SST\t\t: 0x%08x\n", + __raw_readl(base + SSI_SST_ID_REG)); + seq_printf(m, "MODE\t\t: 0x%08x\n", + __raw_readl(base + SSI_SST_MODE_REG)); + seq_printf(m, "FRAMESIZE\t: 0x%08x\n", + __raw_readl(base + SSI_SST_FRAMESIZE_REG)); + seq_printf(m, "DIVISOR\t\t: 0x%08x\n", + __raw_readl(base + SSI_SST_DIVISOR_REG)); + seq_printf(m, "CHANNELS\t: 0x%08x\n", + __raw_readl(base + SSI_SST_CHANNELS_REG)); + seq_printf(m, "ARBMODE\t\t: 0x%08x\n", + __raw_readl(base + SSI_SST_ARBMODE_REG)); + seq_printf(m, "TXSTATE\t\t: 0x%08x\n", + __raw_readl(base + SSI_SST_TXSTATE_REG)); + seq_printf(m, "BUFSTATE\t: 0x%08x\n", + __raw_readl(base + SSI_SST_BUFSTATE_REG)); + seq_printf(m, "BREAK\t\t: 0x%08x\n", + __raw_readl(base + SSI_SST_BREAK_REG)); + for (ch = 0; ch < omap_port->channels; ch++) { + seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch, + __raw_readl(base + SSI_SST_BUFFER_CH_REG(ch))); + } + /* SSR */ + base = omap_port->ssr_base; + seq_printf(m, "\nSSR\n===\n"); + seq_printf(m, "ID SSR\t\t: 0x%08x\n", + __raw_readl(base + SSI_SSR_ID_REG)); + seq_printf(m, "MODE\t\t: 0x%08x\n", + __raw_readl(base + SSI_SSR_MODE_REG)); + seq_printf(m, "FRAMESIZE\t: 0x%08x\n", + __raw_readl(base + SSI_SSR_FRAMESIZE_REG)); + seq_printf(m, "CHANNELS\t: 0x%08x\n", + __raw_readl(base + SSI_SSR_CHANNELS_REG)); + seq_printf(m, "TIMEOUT\t\t: 0x%08x\n", + __raw_readl(base + SSI_SSR_TIMEOUT_REG)); + seq_printf(m, "RXSTATE\t\t: 0x%08x\n", + __raw_readl(base + SSI_SSR_RXSTATE_REG)); + seq_printf(m, "BUFSTATE\t: 0x%08x\n", + __raw_readl(base + SSI_SSR_BUFSTATE_REG)); + seq_printf(m, "BREAK\t\t: 0x%08x\n", + __raw_readl(base + SSI_SSR_BREAK_REG)); + seq_printf(m, "ERROR\t\t: 0x%08x\n", + __raw_readl(base + SSI_SSR_ERROR_REG)); + seq_printf(m, "ERRORACK\t: 0x%08x\n", + __raw_readl(base + SSI_SSR_ERRORACK_REG)); + for (ch = 0; ch < omap_port->channels; ch++) { + seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch, + __raw_readl(base + SSI_SSR_BUFFER_CH_REG(ch))); + } + ssi_clk_disable(ssi); + + return 0; +} + +static int ssi_debug_gdd_show(struct seq_file *m, void *p __maybe_unused) +{ + struct hsi_controller *ssi = m->private; + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + void __iomem *gdd = omap_ssi->gdd; + int lch; + + ssi_clk_enable(ssi); + seq_printf(m, "GDD_MPU_STATUS\t: 0x%08x\n", + __raw_readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG)); + seq_printf(m, "GDD_MPU_ENABLE\t: 0x%08x\n\n", + __raw_readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG)); + seq_printf(m, "HW_ID\t\t: 0x%08x\n", + __raw_readl(gdd + SSI_GDD_HW_ID_REG)); + seq_printf(m, "PPORT_ID\t: 0x%08x\n", + __raw_readl(gdd + SSI_GDD_PPORT_ID_REG)); + seq_printf(m, "MPORT_ID\t: 0x%08x\n", + __raw_readl(gdd + SSI_GDD_MPORT_ID_REG)); + seq_printf(m, "TEST\t\t: 0x%08x\n", + __raw_readl(gdd + SSI_GDD_TEST_REG)); + seq_printf(m, "GCR\t\t: 0x%08x\n", + __raw_readl(gdd + SSI_GDD_GCR_REG)); + + for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++) { + seq_printf(m, "\nGDD LCH %d\n=========\n", lch); + seq_printf(m, "CSDP\t\t: 0x%04x\n", + __raw_readw(gdd + SSI_GDD_CSDP_REG(lch))); + seq_printf(m, "CCR\t\t: 0x%04x\n", + __raw_readw(gdd + SSI_GDD_CCR_REG(lch))); + seq_printf(m, "CICR\t\t: 0x%04x\n", + __raw_readw(gdd + SSI_GDD_CICR_REG(lch))); + seq_printf(m, "CSR\t\t: 0x%04x\n", + __raw_readw(gdd + SSI_GDD_CSR_REG(lch))); + seq_printf(m, "CSSA\t\t: 0x%08x\n", + __raw_readl(gdd + SSI_GDD_CSSA_REG(lch))); + seq_printf(m, "CDSA\t\t: 0x%08x\n", + __raw_readl(gdd + SSI_GDD_CDSA_REG(lch))); + seq_printf(m, "CEN\t\t: 0x%04x\n", + __raw_readw(gdd + SSI_GDD_CEN_REG(lch))); + seq_printf(m, "CSAC\t\t: 0x%04x\n", + __raw_readw(gdd + SSI_GDD_CSAC_REG(lch))); + seq_printf(m, "CDAC\t\t: 0x%04x\n", + __raw_readw(gdd + SSI_GDD_CDAC_REG(lch))); + seq_printf(m, "CLNK_CTRL\t: 0x%04x\n", + __raw_readw(gdd + SSI_GDD_CLNK_CTRL_REG(lch))); + } + ssi_clk_disable(ssi); + + return 0; +} + +static int ssi_div_get(void *data, u64 *val) +{ + struct hsi_port *port = data; + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + + ssi_clk_enable(ssi); + *val = __raw_readl(omap_port->sst_base + SSI_SST_DIVISOR_REG); + ssi_clk_disable(ssi); + + return 0; +} + +static int ssi_div_set(void *data, u64 val) +{ + struct hsi_port *port = data; + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + + if (val > 127) + return -EINVAL; + + ssi_clk_enable(ssi); + __raw_writel(val, omap_port->sst_base + SSI_SST_DIVISOR_REG); + omap_port->sst.divisor = val; + ssi_clk_disable(ssi); + + return 0; +} + +static int ssi_regs_open(struct inode *inode, struct file *file) +{ + return single_open(file, ssi_debug_show, inode->i_private); +} + +static int ssi_port_regs_open(struct inode *inode, struct file *file) +{ + return single_open(file, ssi_debug_port_show, inode->i_private); +} + +static int ssi_gdd_regs_open(struct inode *inode, struct file *file) +{ + return single_open(file, ssi_debug_gdd_show, inode->i_private); +} + +static const struct file_operations ssi_regs_fops = { + .open = ssi_regs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations ssi_port_regs_fops = { + .open = ssi_port_regs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations ssi_gdd_regs_fops = { + .open = ssi_gdd_regs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +DEFINE_SIMPLE_ATTRIBUTE(ssi_sst_div_fops, ssi_div_get, ssi_div_set, "%llu\n"); + +static int __init ssi_debug_add_port(struct omap_ssi_port *omap_port, + void *data) +{ + struct hsi_port *port = to_hsi_port(omap_port->dev); + struct dentry *dir = data; + + dir = debugfs_create_dir(dev_name(omap_port->dev), dir); + if (IS_ERR(dir)) + return PTR_ERR(dir); + debugfs_create_file("regs", S_IRUGO, dir, port, &ssi_port_regs_fops); + dir = debugfs_create_dir("sst", dir); + if (IS_ERR(dir)) + return PTR_ERR(dir); + debugfs_create_file("divisor", S_IRUGO | S_IWUSR, dir, port, + &ssi_sst_div_fops); + + return 0; +} + +static int __init ssi_debug_add_ctrl(struct hsi_controller *ssi) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + struct dentry *dir; + int err; + + /* SSI controller */ + omap_ssi->dir = debugfs_create_dir(dev_name(&ssi->device), NULL); + if (IS_ERR(omap_ssi->dir)) + return PTR_ERR(omap_ssi->dir); + + debugfs_create_file("regs", S_IRUGO, omap_ssi->dir, ssi, + &ssi_regs_fops); + /* SSI GDD (DMA) */ + dir = debugfs_create_dir("gdd", omap_ssi->dir); + if (IS_ERR(dir)) + goto rback; + debugfs_create_file("regs", S_IRUGO, dir, ssi, &ssi_gdd_regs_fops); + /* SSI ports */ + err = ssi_for_each_port(ssi, omap_ssi->dir, ssi_debug_add_port); + if (err < 0) + goto rback; + + return 0; +rback: + debugfs_remove_recursive(omap_ssi->dir); + + return PTR_ERR(dir); +} + +static void ssi_debug_remove_ctrl(struct hsi_controller *ssi) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + debugfs_remove_recursive(omap_ssi->dir); +} +#endif /* CONFIG_DEBUG_FS */ + +static int ssi_claim_lch(struct hsi_msg *msg) +{ + + struct hsi_port *port = hsi_get_port(msg->cl); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + int lch; + + for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++) + if (!omap_ssi->gdd_trn[lch].msg) { + omap_ssi->gdd_trn[lch].msg = msg; + omap_ssi->gdd_trn[lch].sg = msg->sgt.sgl; + return lch; + } + + return -EBUSY; +} + +static int ssi_start_pio(struct hsi_msg *msg) +{ + struct hsi_port *port = hsi_get_port(msg->cl); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + u32 val; + + ssi_clk_enable(ssi); + if (msg->ttype == HSI_MSG_WRITE) { + val = SSI_DATAACCEPT(msg->channel); + ssi_clk_enable(ssi); /* Hold clocks for pio writes */ + } else { + val = SSI_DATAAVAILABLE(msg->channel) | SSI_ERROROCCURED; + } + dev_dbg(&port->device, "Single %s transfer\n", + msg->ttype ? "write" : "read"); + val |= __raw_readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + __raw_writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + ssi_clk_disable(ssi); + msg->actual_len = 0; + msg->status = HSI_STATUS_PROCEEDING; + + return 0; +} + +static int ssi_start_dma(struct hsi_msg *msg, int lch) +{ + struct hsi_port *port = hsi_get_port(msg->cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + void __iomem *gdd = omap_ssi->gdd; + int err; + u16 csdp; + u16 ccr; + u32 s_addr; + u32 d_addr; + u32 tmp; + + if (msg->ttype == HSI_MSG_READ) { + err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents, + DMA_FROM_DEVICE); + if (err < 0) { + dev_dbg(&ssi->device, "DMA map SG failed !\n"); + return err; + } + csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT | + SSI_SRC_SINGLE_ACCESS0 | SSI_SRC_PERIPHERAL_PORT | + SSI_DATA_TYPE_S32; + ccr = msg->channel + 0x10 + (port->num * 8); /* Sync */ + ccr |= SSI_DST_AMODE_POSTINC | SSI_SRC_AMODE_CONST | + SSI_CCR_ENABLE; + s_addr = omap_port->ssr_dma + + SSI_SSR_BUFFER_CH_REG(msg->channel); + d_addr = sg_dma_address(msg->sgt.sgl); + } else { + err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents, + DMA_TO_DEVICE); + if (err < 0) { + dev_dbg(&ssi->device, "DMA map SG failed !\n"); + return err; + } + csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT | + SSI_DST_SINGLE_ACCESS0 | SSI_DST_PERIPHERAL_PORT | + SSI_DATA_TYPE_S32; + ccr = (msg->channel + 1 + (port->num * 8)) & 0xf; /* Sync */ + ccr |= SSI_SRC_AMODE_POSTINC | SSI_DST_AMODE_CONST | + SSI_CCR_ENABLE; + s_addr = sg_dma_address(msg->sgt.sgl); + d_addr = omap_port->sst_dma + + SSI_SST_BUFFER_CH_REG(msg->channel); + } + dev_dbg(&ssi->device, "lch %d cdsp %08x ccr %04x s_addr %08x" + " d_addr %08x\n", lch, csdp, ccr, s_addr, d_addr); + ssi_clk_enable(ssi); /* Hold clocks during the transfer */ + __raw_writew(csdp, gdd + SSI_GDD_CSDP_REG(lch)); + __raw_writew(SSI_BLOCK_IE | SSI_TOUT_IE, gdd + SSI_GDD_CICR_REG(lch)); + __raw_writel(d_addr, gdd + SSI_GDD_CDSA_REG(lch)); + __raw_writel(s_addr, gdd + SSI_GDD_CSSA_REG(lch)); + __raw_writew(SSI_BYTES_TO_FRAMES(msg->sgt.sgl->length), + gdd + SSI_GDD_CEN_REG(lch)); + + spin_lock_bh(&omap_ssi->lock); + tmp = __raw_readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); + tmp |= SSI_GDD_LCH(lch); + __raw_writel(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); + spin_unlock_bh(&omap_ssi->lock); + __raw_writew(ccr, gdd + SSI_GDD_CCR_REG(lch)); + msg->status = HSI_STATUS_PROCEEDING; + + return 0; +} + +static int ssi_start_transfer(struct list_head *queue) +{ + struct hsi_msg *msg; + int lch = -1; + + if (list_empty(queue)) + return 0; + msg = list_first_entry(queue, struct hsi_msg, link); + if (msg->status != HSI_STATUS_QUEUED) + return 0; + if ((msg->sgt.nents) && (msg->sgt.sgl->length > sizeof(u32))) + lch = ssi_claim_lch(msg); + if (lch >= 0) + return ssi_start_dma(msg, lch); + else + return ssi_start_pio(msg); +} + +static void ssi_transfer(struct omap_ssi_port *omap_port, + struct list_head *queue) +{ + struct hsi_msg *msg; + int err = -1; + + spin_lock_bh(&omap_port->lock); + while (err < 0) { + err = ssi_start_transfer(queue); + if (err < 0) { + msg = list_first_entry(queue, struct hsi_msg, link); + msg->status = HSI_STATUS_ERROR; + msg->actual_len = 0; + list_del(&msg->link); + spin_unlock_bh(&omap_port->lock); + msg->complete(msg); + spin_lock_bh(&omap_port->lock); + } + } + spin_unlock_bh(&omap_port->lock); +} + +static u32 ssi_calculate_div(struct hsi_controller *ssi) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + u32 tx_fckrate = (u32) omap_ssi->fck_rate; + + /* / 2 : SSI TX clock is always half of the SSI functional clock */ + tx_fckrate >>= 1; + /* Round down when tx_fckrate % omap_ssi->max_speed == 0 */ + tx_fckrate--; + dev_dbg(&ssi->device, "TX div %d for fck_rate %lu Khz speed %d Kb/s\n", + tx_fckrate / omap_ssi->max_speed, omap_ssi->fck_rate, + omap_ssi->max_speed); + + return tx_fckrate / omap_ssi->max_speed; +} + +static void ssi_error(struct hsi_port *port) +{ + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + struct hsi_msg *msg; + unsigned int i; + u32 err; + u32 val; + u32 tmp; + + /* ACK error */ + err = __raw_readl(omap_port->ssr_base + SSI_SSR_ERROR_REG); + dev_err(&port->device, "SSI error: 0x%02x\n", err); + if (!err) { + dev_dbg(&port->device, "spurious SSI error ignored!\n"); + return; + } + spin_lock(&omap_ssi->lock); + /* Cancel all GDD read transfers */ + for (i = 0, val = 0; i < SSI_MAX_GDD_LCH; i++) { + msg = omap_ssi->gdd_trn[i].msg; + if ((msg) && (msg->ttype == HSI_MSG_READ)) { + __raw_writew(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i)); + val |= (1 << i); + omap_ssi->gdd_trn[i].msg = NULL; + } + } + tmp = __raw_readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); + tmp &= ~val; + __raw_writel(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); + spin_unlock(&omap_ssi->lock); + /* Cancel all PIO read transfers */ + spin_lock(&omap_port->lock); + tmp = __raw_readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + tmp &= 0xfeff00ff; /* Disable error & all dataavailable interrupts */ + __raw_writel(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + /* ACK error */ + __raw_writel(err, omap_port->ssr_base + SSI_SSR_ERRORACK_REG); + __raw_writel(SSI_ERROROCCURED, + omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); + /* Signal the error all current pending read requests */ + for (i = 0; i < omap_port->channels; i++) { + if (list_empty(&omap_port->rxqueue[i])) + continue; + msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg, + link); + list_del(&msg->link); + msg->status = HSI_STATUS_ERROR; + spin_unlock(&omap_port->lock); + msg->complete(msg); + /* Now restart queued reads if any */ + ssi_transfer(omap_port, &omap_port->rxqueue[i]); + spin_lock(&omap_port->lock); + } + spin_unlock(&omap_port->lock); +} + +static void ssi_break_complete(struct hsi_port *port) +{ + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + struct hsi_msg *msg; + struct hsi_msg *tmp; + u32 val; + + dev_dbg(&port->device, "HWBREAK received\n"); + + spin_lock(&omap_port->lock); + val = __raw_readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + val &= ~SSI_BREAKDETECTED; + __raw_writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + __raw_writel(0, omap_port->ssr_base + SSI_SSR_BREAK_REG); + __raw_writel(SSI_BREAKDETECTED, + omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); + spin_unlock(&omap_port->lock); + + list_for_each_entry_safe(msg, tmp, &omap_port->brkqueue, link) { + msg->status = HSI_STATUS_COMPLETED; + spin_lock(&omap_port->lock); + list_del(&msg->link); + spin_unlock(&omap_port->lock); + msg->complete(msg); + } + +} + +static int ssi_async_break(struct hsi_msg *msg) +{ + struct hsi_port *port = hsi_get_port(msg->cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + int err = 0; + u32 tmp; + + ssi_clk_enable(ssi); + if (msg->ttype == HSI_MSG_WRITE) { + if (omap_port->sst.mode != SSI_MODE_FRAME) { + err = -EINVAL; + goto out; + } + __raw_writel(1, omap_port->sst_base + SSI_SST_BREAK_REG); + msg->status = HSI_STATUS_COMPLETED; + msg->complete(msg); + } else { + if (omap_port->ssr.mode != SSI_MODE_FRAME) { + err = -EINVAL; + goto out; + } + spin_lock_bh(&omap_port->lock); + tmp = __raw_readl(omap_ssi->sys + + SSI_MPU_ENABLE_REG(port->num, 0)); + __raw_writel(tmp | SSI_BREAKDETECTED, + omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + msg->status = HSI_STATUS_PROCEEDING; + list_add_tail(&msg->link, &omap_port->brkqueue); + spin_unlock_bh(&omap_port->lock); + } +out: + ssi_clk_disable(ssi); + + return err; +} + +static int ssi_async(struct hsi_msg *msg) +{ + struct hsi_port *port = hsi_get_port(msg->cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct list_head *queue; + int err = 0; + + BUG_ON(!msg); + + if (msg->sgt.nents > 1) + return -ENOSYS; /* TODO: Add sg support */ + + if (msg->break_frame) + return ssi_async_break(msg); + + if (msg->ttype) { + BUG_ON(msg->channel >= omap_port->sst.channels); + queue = &omap_port->txqueue[msg->channel]; + } else { + BUG_ON(msg->channel >= omap_port->ssr.channels); + queue = &omap_port->rxqueue[msg->channel]; + } + msg->status = HSI_STATUS_QUEUED; + spin_lock_bh(&omap_port->lock); + list_add_tail(&msg->link, queue); + err = ssi_start_transfer(queue); + if (err < 0) { + list_del(&msg->link); + msg->status = HSI_STATUS_ERROR; + } + spin_unlock_bh(&omap_port->lock); + dev_dbg(&port->device, "msg status %d ttype %d ch %d\n", + msg->status, msg->ttype, msg->channel); + + return err; +} + +static void ssi_flush_queue(struct list_head *queue, struct hsi_client *cl) +{ + struct list_head *node, *tmp; + struct hsi_msg *msg; + + list_for_each_safe(node, tmp, queue) { + msg = list_entry(node, struct hsi_msg, link); + if ((cl) && (cl != msg->cl)) + continue; + list_del(node); + pr_debug("flush queue: ch %d, msg %p len %d type %d ctxt %p\n", + msg->channel, msg, msg->sgt.sgl->length, + msg->ttype, msg->context); + if (msg->destructor) + msg->destructor(msg); + else + hsi_free_msg(msg); + } +} + +static int ssi_setup(struct hsi_client *cl) +{ + struct hsi_port *port = to_hsi_port(cl->device.parent); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + void __iomem *sst = omap_port->sst_base; + void __iomem *ssr = omap_port->ssr_base; + u32 div; + u32 val; + int err = 0; + + ssi_clk_enable(ssi); + spin_lock_bh(&omap_port->lock); + if (cl->tx_cfg.speed) + omap_ssi->max_speed = cl->tx_cfg.speed; + div = ssi_calculate_div(ssi); + if (div > SSI_MAX_DIVISOR) { + dev_err(&cl->device, "Invalid TX speed %d Mb/s (div %d)\n", + cl->tx_cfg.speed, div); + err = -EINVAL; + goto out; + } + /* Set TX/RX module to sleep to stop TX/RX during cfg update */ + __raw_writel(SSI_MODE_SLEEP, sst + SSI_SST_MODE_REG); + __raw_writel(SSI_MODE_SLEEP, ssr + SSI_SSR_MODE_REG); + /* Flush posted write */ + val = __raw_readl(ssr + SSI_SSR_MODE_REG); + /* TX */ + __raw_writel(31, sst + SSI_SST_FRAMESIZE_REG); + __raw_writel(div, sst + SSI_SST_DIVISOR_REG); + __raw_writel(cl->tx_cfg.channels, sst + SSI_SST_CHANNELS_REG); + __raw_writel(cl->tx_cfg.arb_mode, sst + SSI_SST_ARBMODE_REG); + __raw_writel(cl->tx_cfg.mode, sst + SSI_SST_MODE_REG); + /* RX */ + __raw_writel(31, ssr + SSI_SSR_FRAMESIZE_REG); + __raw_writel(cl->rx_cfg.channels, ssr + SSI_SSR_CHANNELS_REG); + __raw_writel(0, ssr + SSI_SSR_TIMEOUT_REG); + /* Cleanup the break queue if we leave FRAME mode */ + if ((omap_port->ssr.mode == SSI_MODE_FRAME) && + (cl->rx_cfg.mode != SSI_MODE_FRAME)) + ssi_flush_queue(&omap_port->brkqueue, cl); + __raw_writel(cl->rx_cfg.mode, ssr + SSI_SSR_MODE_REG); + omap_port->channels = max(cl->rx_cfg.channels, cl->tx_cfg.channels); + /* Shadow registering for OFF mode */ + /* SST */ + omap_port->sst.divisor = div; + omap_port->sst.frame_size = 31; + omap_port->sst.channels = cl->tx_cfg.channels; + omap_port->sst.arb_mode = cl->tx_cfg.arb_mode; + omap_port->sst.mode = cl->tx_cfg.mode; + /* SSR */ + omap_port->ssr.frame_size = 31; + omap_port->ssr.timeout = 0; + omap_port->ssr.channels = cl->rx_cfg.channels; + omap_port->ssr.mode = cl->rx_cfg.mode; +out: + spin_unlock_bh(&omap_port->lock); + ssi_clk_disable(ssi); + + return err; +} + +static void ssi_cleanup_queues(struct hsi_client *cl) +{ + struct hsi_port *port = hsi_get_port(cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + struct hsi_msg *msg; + unsigned int i; + u32 rxbufstate = 0; + u32 txbufstate = 0; + u32 status = SSI_ERROROCCURED; + u32 tmp; + + ssi_flush_queue(&omap_port->brkqueue, cl); + if (list_empty(&omap_port->brkqueue)) + status |= SSI_BREAKDETECTED; + + for (i = 0; i < omap_port->channels; i++) { + if (list_empty(&omap_port->txqueue[i])) + continue; + msg = list_first_entry(&omap_port->txqueue[i], struct hsi_msg, + link); + if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) { + txbufstate |= (1 << i); + status |= SSI_DATAACCEPT(i); + /* Release the clocks writes, also GDD ones */ + ssi_clk_disable(ssi); + } + ssi_flush_queue(&omap_port->txqueue[i], cl); + } + for (i = 0; i < omap_port->channels; i++) { + if (list_empty(&omap_port->rxqueue[i])) + continue; + msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg, + link); + if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) { + rxbufstate |= (1 << i); + status |= SSI_DATAAVAILABLE(i); + } + ssi_flush_queue(&omap_port->rxqueue[i], cl); + /* Check if we keep the error detection interrupt armed */ + if (!list_empty(&omap_port->rxqueue[i])) + status &= ~SSI_ERROROCCURED; + } + /* Cleanup write buffers */ + tmp = __raw_readl(omap_port->sst_base + SSI_SST_BUFSTATE_REG); + tmp &= ~txbufstate; + __raw_writel(tmp, omap_port->sst_base + SSI_SST_BUFSTATE_REG); + /* Cleanup read buffers */ + tmp = __raw_readl(omap_port->ssr_base + SSI_SSR_BUFSTATE_REG); + tmp &= ~rxbufstate; + __raw_writel(tmp, omap_port->ssr_base + SSI_SSR_BUFSTATE_REG); + /* Disarm and ack pending interrupts */ + tmp = __raw_readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + tmp &= ~status; + __raw_writel(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + __raw_writel(status, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); +} + +static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + struct hsi_msg *msg; + unsigned int i; + u32 val = 0; + u32 tmp; + + for (i = 0; i < SSI_MAX_GDD_LCH; i++) { + msg = omap_ssi->gdd_trn[i].msg; + if ((!msg) || (msg->cl != cl)) + continue; + __raw_writew(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i)); + val |= (1 << i); + /* + * Clock references for write will be handled in + * ssi_cleanup_queues + */ + if (msg->ttype == HSI_MSG_READ) + ssi_clk_disable(ssi); + omap_ssi->gdd_trn[i].msg = NULL; + } + tmp = __raw_readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); + tmp &= ~val; + __raw_writel(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); + __raw_writel(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG); +} + +static int ssi_release(struct hsi_client *cl) +{ + struct hsi_port *port = hsi_get_port(cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + spin_lock_bh(&omap_port->lock); + ssi_clk_enable(ssi); + /* Stop all the pending DMA requests for that client */ + ssi_cleanup_gdd(ssi, cl); + /* Now cleanup all the queues */ + ssi_cleanup_queues(cl); + ssi_clk_disable(ssi); + /* If it is the last client of the port, do extra checks and cleanup */ + if (port->claimed <= 1) { + /* + * Drop the clock reference for the incoming wake line + * if it is still kept high by the other side. + */ + if (omap_port->wkin_cken) { + ssi_clk_disable(ssi); + omap_port->wkin_cken = 0; + } + ssi_clk_enable(ssi); + /* Stop any SSI TX/RX without a client */ + ssi_set_mode(ssi, SSI_MODE_SLEEP); + omap_port->sst.mode = SSI_MODE_SLEEP; + omap_port->ssr.mode = SSI_MODE_SLEEP; + ssi_clk_disable(ssi); + WARN_ON(omap_port->wk_refcount != 0); + WARN_ON(omap_ssi->ck_refcount != 0); + } + spin_unlock_bh(&omap_port->lock); + + return 0; +} + +static int ssi_flush(struct hsi_client *cl) +{ + struct hsi_port *port = hsi_get_port(cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + struct hsi_msg *msg; + void __iomem *sst = omap_port->sst_base; + void __iomem *ssr = omap_port->ssr_base; + unsigned int i; + u32 err; + + ssi_clk_enable(ssi); + spin_lock_bh(&omap_port->lock); + /* Stop all DMA transfers */ + for (i = 0; i < SSI_MAX_GDD_LCH; i++) { + msg = omap_ssi->gdd_trn[i].msg; + if (!msg || (port != hsi_get_port(msg->cl))) + continue; + __raw_writew(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i)); + if (msg->ttype == HSI_MSG_READ) + ssi_clk_disable(ssi); + omap_ssi->gdd_trn[i].msg = NULL; + } + /* Flush all SST buffers */ + __raw_writel(0, sst + SSI_SST_BUFSTATE_REG); + __raw_writel(0, sst + SSI_SST_TXSTATE_REG); + /* Flush all SSR buffers */ + __raw_writel(0, ssr + SSI_SSR_RXSTATE_REG); + __raw_writel(0, ssr + SSI_SSR_BUFSTATE_REG); + /* Flush all errors */ + err = __raw_readl(ssr + SSI_SSR_ERROR_REG); + __raw_writel(err, ssr + SSI_SSR_ERRORACK_REG); + /* Flush break */ + __raw_writel(0, ssr + SSI_SSR_BREAK_REG); + /* Clear interrupts */ + __raw_writel(0, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + __raw_writel(0xffffff00, + omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); + __raw_writel(0, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); + __raw_writel(0xff, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG); + /* Dequeue all pending requests */ + for (i = 0; i < omap_port->channels; i++) { + /* Release write clocks */ + if (!list_empty(&omap_port->txqueue[i])) + ssi_clk_disable(ssi); + ssi_flush_queue(&omap_port->txqueue[i], NULL); + ssi_flush_queue(&omap_port->rxqueue[i], NULL); + } + ssi_flush_queue(&omap_port->brkqueue, NULL); + spin_unlock_bh(&omap_port->lock); + ssi_clk_disable(ssi); + + return 0; +} + +static int ssi_start_tx(struct hsi_client *cl) +{ + struct hsi_port *port = hsi_get_port(cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + dev_dbg(&port->device, "Wake out high %d\n", omap_port->wk_refcount); + + spin_lock_bh(&omap_port->wk_lock); + if (omap_port->wk_refcount++) { + spin_unlock_bh(&omap_port->wk_lock); + return 0; + } + ssi_clk_enable(ssi); /* Grab clocks */ + __raw_writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num)); + spin_unlock_bh(&omap_port->wk_lock); + + return 0; +} + +static int ssi_stop_tx(struct hsi_client *cl) +{ + struct hsi_port *port = hsi_get_port(cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + dev_dbg(&port->device, "Wake out low %d\n", omap_port->wk_refcount); + + spin_lock_bh(&omap_port->wk_lock); + BUG_ON(!omap_port->wk_refcount); + if (--omap_port->wk_refcount) { + spin_unlock_bh(&omap_port->wk_lock); + return 0; + } + __raw_writel(SSI_WAKE(0), + omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num)); + ssi_clk_disable(ssi); /* Release clocks */ + spin_unlock_bh(&omap_port->wk_lock); + + return 0; +} + +static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue) +{ + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_msg *msg; + u32 *buf; + u32 reg; + u32 val; + + spin_lock(&omap_port->lock); + msg = list_first_entry(queue, struct hsi_msg, link); + if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) { + msg->actual_len = 0; + msg->status = HSI_STATUS_PENDING; + } + if (msg->ttype == HSI_MSG_WRITE) + val = SSI_DATAACCEPT(msg->channel); + else + val = SSI_DATAAVAILABLE(msg->channel); + if (msg->status == HSI_STATUS_PROCEEDING) { + buf = sg_virt(msg->sgt.sgl) + msg->actual_len; + if (msg->ttype == HSI_MSG_WRITE) + __raw_writel(*buf, omap_port->sst_base + + SSI_SST_BUFFER_CH_REG(msg->channel)); + else + *buf = __raw_readl(omap_port->ssr_base + + SSI_SSR_BUFFER_CH_REG(msg->channel)); + dev_dbg(&port->device, "ch %d ttype %d 0x%08x\n", msg->channel, + msg->ttype, *buf); + msg->actual_len += sizeof(*buf); + if (msg->actual_len >= msg->sgt.sgl->length) + msg->status = HSI_STATUS_COMPLETED; + /* + * Wait for the last written frame to be really sent before + * we call the complete callback + */ + if ((msg->status == HSI_STATUS_PROCEEDING) || + ((msg->status == HSI_STATUS_COMPLETED) && + (msg->ttype == HSI_MSG_WRITE))) { + __raw_writel(val, omap_ssi->sys + + SSI_MPU_STATUS_REG(port->num, 0)); + spin_unlock(&omap_port->lock); + + return; + } + + } + /* Transfer completed at this point */ + reg = __raw_readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + if (msg->ttype == HSI_MSG_WRITE) + ssi_clk_disable(ssi); /* Release clocks for write transfer */ + reg &= ~val; + __raw_writel(reg, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + __raw_writel(val, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); + list_del(&msg->link); + spin_unlock(&omap_port->lock); + msg->complete(msg); + ssi_transfer(omap_port, queue); +} + +static void ssi_gdd_complete(struct hsi_controller *ssi, unsigned int lch) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + struct hsi_msg *msg = omap_ssi->gdd_trn[lch].msg; + struct hsi_port *port = to_hsi_port(msg->cl->device.parent); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + unsigned int dir; + u32 csr; + u32 val; + + spin_lock(&omap_ssi->lock); + + val = __raw_readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); + val &= ~SSI_GDD_LCH(lch); + __raw_writel(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); + + if (msg->ttype == HSI_MSG_READ) { + dir = DMA_FROM_DEVICE; + val = SSI_DATAAVAILABLE(msg->channel); + ssi_clk_disable(ssi); + } else { + dir = DMA_TO_DEVICE; + val = SSI_DATAACCEPT(msg->channel); + /* Keep clocks reference for write pio event */ + } + dma_unmap_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents, dir); + csr = __raw_readw(omap_ssi->gdd + SSI_GDD_CSR_REG(lch)); + omap_ssi->gdd_trn[lch].msg = NULL; /* release GDD lch */ + dev_dbg(&port->device, "DMA completed ch %d ttype %d\n", + msg->channel, msg->ttype); + spin_unlock(&omap_ssi->lock); + if (csr & SSI_CSR_TOUR) { /* Timeout error */ + msg->status = HSI_STATUS_ERROR; + msg->actual_len = 0; + spin_lock(&omap_port->lock); + list_del(&msg->link); /* Dequeue msg */ + spin_unlock(&omap_port->lock); + msg->complete(msg); + return; + } + spin_lock(&omap_port->lock); + val |= __raw_readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + __raw_writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + spin_unlock(&omap_port->lock); + + msg->status = HSI_STATUS_COMPLETED; + msg->actual_len = sg_dma_len(msg->sgt.sgl); +} + +static void ssi_gdd_tasklet(unsigned long dev) +{ + struct hsi_controller *ssi = (struct hsi_controller *)dev; + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + void __iomem *sys = omap_ssi->sys; + unsigned int lch; + u32 status_reg; + + ssi_clk_enable(ssi); + + status_reg = __raw_readl(sys + SSI_GDD_MPU_IRQ_STATUS_REG); + for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++) { + if (status_reg & SSI_GDD_LCH(lch)) + ssi_gdd_complete(ssi, lch); + } + __raw_writel(status_reg, sys + SSI_GDD_MPU_IRQ_STATUS_REG); + status_reg = __raw_readl(sys + SSI_GDD_MPU_IRQ_STATUS_REG); + ssi_clk_disable(ssi); + if (status_reg) + tasklet_hi_schedule(&omap_ssi->gdd_tasklet); + else + enable_irq(omap_ssi->gdd_irq); + +} + +static irqreturn_t ssi_gdd_isr(int irq, void *ssi) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + tasklet_hi_schedule(&omap_ssi->gdd_tasklet); + disable_irq_nosync(irq); + + return IRQ_HANDLED; +} + +static void ssi_pio_tasklet(unsigned long ssi_port) +{ + struct hsi_port *port = (struct hsi_port *)ssi_port; + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + void __iomem *sys = omap_ssi->sys; + unsigned int ch; + u32 status_reg; + + ssi_clk_enable(ssi); + status_reg = __raw_readl(sys + SSI_MPU_STATUS_REG(port->num, 0)); + status_reg &= __raw_readl(sys + SSI_MPU_ENABLE_REG(port->num, 0)); + + for (ch = 0; ch < omap_port->channels; ch++) { + if (status_reg & SSI_DATAACCEPT(ch)) + ssi_pio_complete(port, &omap_port->txqueue[ch]); + if (status_reg & SSI_DATAAVAILABLE(ch)) + ssi_pio_complete(port, &omap_port->rxqueue[ch]); + } + if (status_reg & SSI_BREAKDETECTED) + ssi_break_complete(port); + if (status_reg & SSI_ERROROCCURED) + ssi_error(port); + + status_reg = __raw_readl(sys + SSI_MPU_STATUS_REG(port->num, 0)); + status_reg &= __raw_readl(sys + SSI_MPU_ENABLE_REG(port->num, 0)); + ssi_clk_disable(ssi); + + if (status_reg) + tasklet_hi_schedule(&omap_port->pio_tasklet); + else + enable_irq(omap_port->irq); +} + +static irqreturn_t ssi_pio_isr(int irq, void *port) +{ + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + + tasklet_hi_schedule(&omap_port->pio_tasklet); + disable_irq_nosync(irq); + + return IRQ_HANDLED; +} + +static void ssi_wake_tasklet(unsigned long ssi_port) +{ + struct hsi_port *port = (struct hsi_port *)ssi_port; + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + + if (ssi_wakein(port)) { + /** + * We can have a quick High-Low-High transition in the line. + * In such a case if we have long interrupt latencies, + * we can miss the low event or get twice a high event. + * This workaround will avoid breaking the clock reference + * count when such a situation ocurrs. + */ + spin_lock(&omap_port->lock); + if (!omap_port->wkin_cken) { + omap_port->wkin_cken = 1; + ssi_clk_enable(ssi); + } + spin_unlock(&omap_port->lock); + dev_dbg(&ssi->device, "Wake in high\n"); + hsi_event(port, HSI_EVENT_START_RX); + } else { + dev_dbg(&ssi->device, "Wake in low\n"); + hsi_event(port, HSI_EVENT_STOP_RX); + spin_lock(&omap_port->lock); + if (omap_port->wkin_cken) { + ssi_clk_disable(ssi); + omap_port->wkin_cken = 0; + } + spin_unlock(&omap_port->lock); + } +} + +static irqreturn_t ssi_wake_isr(int irq __maybe_unused, void *ssi_port) +{ + struct omap_ssi_port *omap_port = hsi_port_drvdata(ssi_port); + + tasklet_hi_schedule(&omap_port->wake_tasklet); + + return IRQ_HANDLED; +} + +static int __init ssi_port_irq(struct hsi_port *port, + struct platform_device *pd) +{ + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct resource *irq; + int err; + + irq = platform_get_resource(pd, IORESOURCE_IRQ, (port->num * 3) + 1); + if (!irq) { + dev_err(&port->device, "Port IRQ resource missing\n"); + return -ENXIO; + } + omap_port->irq = irq->start; + tasklet_init(&omap_port->pio_tasklet, ssi_pio_tasklet, + (unsigned long)port); + err = devm_request_irq(&pd->dev, omap_port->irq, ssi_pio_isr, + IRQF_DISABLED, irq->name, port); + if (err < 0) + dev_err(&port->device, "Request IRQ %d failed (%d)\n", + omap_port->irq, err); + return err; +} + +static int __init ssi_wake_irq(struct hsi_port *port, + struct platform_device *pd) +{ + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct resource *irq; + int err; + + irq = platform_get_resource(pd, IORESOURCE_IRQ, (port->num * 3) + 3); + if (!irq) { + dev_err(&port->device, "Wake in IRQ resource missing"); + return -ENXIO; + } + if (irq->flags & IORESOURCE_UNSET) { + dev_info(&port->device, "No Wake in support\n"); + omap_port->wake_irq = -1; + return 0; + } + omap_port->wake_irq = irq->start; + tasklet_init(&omap_port->wake_tasklet, ssi_wake_tasklet, + (unsigned long)port); + err = devm_request_irq(&pd->dev, omap_port->wake_irq, ssi_wake_isr, + IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, + irq->name, port); + if (err < 0) + dev_err(&port->device, "Request Wake in IRQ %d failed %d\n", + omap_port->wake_irq, err); + err = enable_irq_wake(omap_port->wake_irq); + if (err < 0) + dev_err(&port->device, "Enable wake on the wakeline in irq %d" + " failed %d\n", omap_port->wake_irq, err); + + return err; +} + +static void __init ssi_queues_init(struct omap_ssi_port *omap_port) +{ + unsigned int ch; + + for (ch = 0; ch < SSI_MAX_CHANNELS; ch++) { + INIT_LIST_HEAD(&omap_port->txqueue[ch]); + INIT_LIST_HEAD(&omap_port->rxqueue[ch]); + } + INIT_LIST_HEAD(&omap_port->brkqueue); +} + +static int __init ssi_get_iomem(struct platform_device *pd, + unsigned int num, void __iomem **pbase, dma_addr_t *phy) +{ + struct resource *mem; + struct resource *ioarea; + void __iomem *base; + + mem = platform_get_resource(pd, IORESOURCE_MEM, num); + if (!mem) { + dev_err(&pd->dev, "IO memory region missing (%d)\n", num); + return -ENXIO; + } + ioarea = devm_request_mem_region(&pd->dev, mem->start, + resource_size(mem), dev_name(&pd->dev)); + if (!ioarea) { + dev_err(&pd->dev, "%s IO memory region request failed\n", + mem->name); + return -ENXIO; + } + base = devm_ioremap(&pd->dev, mem->start, resource_size(mem)); + if (!base) { + dev_err(&pd->dev, "%s IO remap failed\n", mem->name); + return -ENXIO; + } + *pbase = base; + + if (phy) + *phy = mem->start; + + return 0; +} + +static int __init ssi_ports_init(struct hsi_controller *ssi, + struct platform_device *pd) +{ + struct hsi_port *port; + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + struct omap_ssi_port *omap_port; + unsigned int i; + int err; + + omap_ssi->port = devm_kzalloc(&pd->dev, + sizeof(omap_port) * ssi->num_ports, GFP_KERNEL); + if (!omap_ssi->port) + return -ENOMEM; + + for (i = 0; i < ssi->num_ports; i++) { + port = &ssi->port[i]; + omap_port = devm_kzalloc(&pd->dev, sizeof(*omap_port), + GFP_KERNEL); + if (!omap_port) + return -ENOMEM; + port->async = ssi_async; + port->setup = ssi_setup; + port->flush = ssi_flush; + port->start_tx = ssi_start_tx; + port->stop_tx = ssi_stop_tx; + port->release = ssi_release; + hsi_port_set_drvdata(port, omap_port); + /* Get SST base addresses*/ + err = ssi_get_iomem(pd, ((i * 2) + 2), &omap_port->sst_base, + &omap_port->sst_dma); + if (err < 0) + return err; + /* Get SSR base addresses */ + err = ssi_get_iomem(pd, ((i * 2) + 3), &omap_port->ssr_base, + &omap_port->ssr_dma); + if (err < 0) + return err; + err = ssi_port_irq(port, pd); + if (err < 0) + return err; + err = ssi_wake_irq(port, pd); + if (err < 0) + return err; + ssi_queues_init(omap_port); + spin_lock_init(&omap_port->lock); + spin_lock_init(&omap_port->wk_lock); + omap_port->dev = &port->device; + omap_ssi->port[i] = omap_port; + } + + return 0; +} + +static void ssi_ports_exit(struct hsi_controller *ssi) +{ + struct omap_ssi_port *omap_port; + unsigned int i; + + for (i = 0; i < ssi->num_ports; i++) { + omap_port = hsi_port_drvdata(&ssi->port[i]); + tasklet_kill(&omap_port->wake_tasklet); + tasklet_kill(&omap_port->pio_tasklet); + } +} + +static void ssi_clk_release(struct device *dev __maybe_unused, void *res) +{ + struct ssi_clk_res *r = res; + + clk_put(r->clk); +} + +static struct clk *__init ssi_devm_clk_get(struct device *dev, const char *id) +{ + struct ssi_clk_res *pclk; + struct clk *clk; + + pclk = devres_alloc(ssi_clk_release, sizeof(*pclk), GFP_KERNEL); + if (!pclk) { + dev_err(dev, "Could not allocate the device resource entry\n"); + return ERR_PTR(-ENOMEM); + } + clk = clk_get(dev, id); + if (IS_ERR(clk)) { + dev_err(dev, "clock get %s failed %li\n", id, PTR_ERR(clk)); + devres_free(pclk); + } else { + pclk->clk = clk; + devres_add(dev, pclk); + } + + return clk; +} + +static int __init ssi_add_controller(struct hsi_controller *ssi, + struct platform_device *pd) +{ + struct omap_ssi_platform_data *omap_ssi_pdata = pd->dev.platform_data; + struct omap_ssi_controller *omap_ssi; + struct resource *irq; + int err; + + omap_ssi = devm_kzalloc(&pd->dev, sizeof(*omap_ssi), GFP_KERNEL); + if (!omap_ssi) { + dev_err(&pd->dev, "not enough memory for omap ssi\n"); + return -ENOMEM; + } + ssi->id = pd->id; + ssi->owner = THIS_MODULE; + ssi->device.parent = &pd->dev; + dev_set_name(&ssi->device, "ssi%d", ssi->id); + hsi_controller_set_drvdata(ssi, omap_ssi); + omap_ssi->dev = &ssi->device; + err = ssi_get_iomem(pd, 0, &omap_ssi->sys, NULL); + if (err < 0) + return err; + err = ssi_get_iomem(pd, 1, &omap_ssi->gdd, NULL); + if (err < 0) + return err; + irq = platform_get_resource(pd, IORESOURCE_IRQ, 0); + if (!irq) { + dev_err(&pd->dev, "GDD IRQ resource missing\n"); + return -ENXIO; + } + omap_ssi->gdd_irq = irq->start; + tasklet_init(&omap_ssi->gdd_tasklet, ssi_gdd_tasklet, + (unsigned long)ssi); + err = devm_request_irq(&pd->dev, omap_ssi->gdd_irq, ssi_gdd_isr, + IRQF_DISABLED, irq->name, ssi); + if (err < 0) { + dev_err(&ssi->device, "Request GDD IRQ %d failed (%d)", + omap_ssi->gdd_irq, err); + return err; + } + err = ssi_ports_init(ssi, pd); + if (err < 0) + return err; + omap_ssi->get_loss = omap_ssi_pdata->get_dev_context_loss_count; + omap_ssi->max_speed = UINT_MAX; + spin_lock_init(&omap_ssi->lock); + spin_lock_init(&omap_ssi->ck_lock); + omap_ssi->ick = ssi_devm_clk_get(&pd->dev, "ssi_ick"); + if (IS_ERR(omap_ssi->ick)) + return PTR_ERR(omap_ssi->ick); + omap_ssi->fck = ssi_devm_clk_get(&pd->dev, "ssi_ssr_fck"); + if (IS_ERR(omap_ssi->fck)) + return PTR_ERR(omap_ssi->fck); + err = hsi_register_controller(ssi); + + return err; +} + +static int __init ssi_hw_init(struct hsi_controller *ssi) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + unsigned int i; + u32 val; + int err; + + err = ssi_clk_enable(ssi); + if (err < 0) { + dev_err(&ssi->device, "Failed to enable the clocks %d\n", err); + return err; + } + /* Reseting SSI controller */ + __raw_writel(SSI_SOFTRESET, omap_ssi->sys + SSI_SYSCONFIG_REG); + val = __raw_readl(omap_ssi->sys + SSI_SYSSTATUS_REG); + for (i = 0; ((i < 20) && !(val & SSI_RESETDONE)); i++) { + msleep(20); + val = __raw_readl(omap_ssi->sys + SSI_SYSSTATUS_REG); + } + if (!(val & SSI_RESETDONE)) { + dev_err(&ssi->device, "SSI HW reset failed\n"); + ssi_clk_disable(ssi); + return -EIO; + } + /* Reseting GDD */ + __raw_writel(SSI_SWRESET, omap_ssi->gdd + SSI_GDD_GRST_REG); + /* Get FCK rate */ + omap_ssi->fck_rate = clk_get_rate(omap_ssi->fck) / 1000; /* KHz */ + dev_dbg(&ssi->device, "SSI fck rate %lu KHz\n", omap_ssi->fck_rate); + /* Set default PM settings */ + val = SSI_AUTOIDLE | SSI_SIDLEMODE_SMART | SSI_MIDLEMODE_SMART; + __raw_writel(val, omap_ssi->sys + SSI_SYSCONFIG_REG); + omap_ssi->sysconfig = val; + __raw_writel(SSI_CLK_AUTOGATING_ON, omap_ssi->sys + SSI_GDD_GCR_REG); + omap_ssi->gdd_gcr = SSI_CLK_AUTOGATING_ON; + ssi_clk_disable(ssi); + + return 0; +} + +static void ssi_remove_controller(struct hsi_controller *ssi) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + ssi_ports_exit(ssi); + tasklet_kill(&omap_ssi->gdd_tasklet); + hsi_unregister_controller(ssi); +} + +static int __init ssi_probe(struct platform_device *pd) +{ + struct omap_ssi_platform_data *omap_ssi_pdata = pd->dev.platform_data; + struct hsi_controller *ssi; + int err; + + if (!omap_ssi_pdata) { + dev_err(&pd->dev, "No OMAP SSI platform data\n"); + return -EINVAL; + } + ssi = hsi_alloc_controller(omap_ssi_pdata->num_ports, GFP_KERNEL); + if (!ssi) { + dev_err(&pd->dev, "No memory for controller\n"); + return -ENOMEM; + } + platform_set_drvdata(pd, ssi); + err = ssi_add_controller(ssi, pd); + if (err < 0) + goto out1; + err = ssi_hw_init(ssi); + if (err < 0) + goto out2; +#ifdef CONFIG_DEBUG_FS + err = ssi_debug_add_ctrl(ssi); + if (err < 0) + goto out2; +#endif + return err; +out2: + ssi_remove_controller(ssi); +out1: + platform_set_drvdata(pd, NULL); + hsi_free_controller(ssi); + + return err; +} + +static int __exit ssi_remove(struct platform_device *pd) +{ + struct hsi_controller *ssi = platform_get_drvdata(pd); + +#ifdef CONFIG_DEBUG_FS + ssi_debug_remove_ctrl(ssi); +#endif + ssi_remove_controller(ssi); + platform_set_drvdata(pd, NULL); + hsi_free_controller(ssi); + + return 0; +} + +static struct platform_driver ssi_pdriver = { + .remove = __exit_p(ssi_remove), + .driver = { + .name = "omap_ssi", + .owner = THIS_MODULE, + }, +}; + +static int __init omap_ssi_init(void) +{ + pr_info("OMAP SSI hw driver loaded\n"); + return platform_driver_probe(&ssi_pdriver, ssi_probe); +} +module_init(omap_ssi_init); + +static void __exit omap_ssi_exit(void) +{ + platform_driver_unregister(&ssi_pdriver); + pr_info("OMAP SSI driver removed\n"); +} +module_exit(omap_ssi_exit); + +MODULE_ALIAS("platform:omap_ssi"); +MODULE_AUTHOR("Carlos Chinea "); +MODULE_DESCRIPTION("Synchronous Serial Interface Driver"); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From 8999e34d81e7949139cbabca57cde5429ed530fb Mon Sep 17 00:00:00 2001 From: Philippe Langlais Date: Tue, 19 Apr 2011 09:39:21 +0200 Subject: HSI: omap_ssi: Add OMAP SSI to the kernel configuration Add OMAP SSI device and driver to the kernel configuration Change-Id: I7f21d4016a98db6f53efcd03f9ffd176b8845d7c Signed-off-by: Carlos Chinea Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/20577 Reviewed-by: Pawel SZYSZUK Tested-by: Pawel SZYSZUK Reviewed-by: Jonas ABERG Conflicts: arch/arm/mach-omap2/Makefile --- arch/arm/mach-omap2/Makefile | 3 +++ drivers/hsi/Kconfig | 1 + drivers/hsi/Makefile | 1 + drivers/hsi/controllers/Kconfig | 23 +++++++++++++++++++++++ drivers/hsi/controllers/Makefile | 5 +++++ 5 files changed, 33 insertions(+) create mode 100644 drivers/hsi/controllers/Kconfig create mode 100644 drivers/hsi/controllers/Makefile diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index 49f92bc1c31..95578bb571d 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile @@ -187,6 +187,9 @@ ifneq ($(CONFIG_TIDSPBRIDGE),) obj-y += dsp.o endif +omap-ssi-$(CONFIG_OMAP_SSI) := ssi.o +obj-y += $(omap-ssi-m) $(omap-ssi-y) + # Specific board support obj-$(CONFIG_MACH_OMAP_GENERIC) += board-generic.o obj-$(CONFIG_MACH_OMAP_H4) += board-h4.o diff --git a/drivers/hsi/Kconfig b/drivers/hsi/Kconfig index d94e38dd80c..f053858683c 100644 --- a/drivers/hsi/Kconfig +++ b/drivers/hsi/Kconfig @@ -15,5 +15,6 @@ config HSI_BOARDINFO default y source "drivers/hsi/clients/Kconfig" +source "drivers/hsi/controllers/Kconfig" endif # HSI diff --git a/drivers/hsi/Makefile b/drivers/hsi/Makefile index 9d5d33f90de..586e5e0729e 100644 --- a/drivers/hsi/Makefile +++ b/drivers/hsi/Makefile @@ -4,3 +4,4 @@ obj-$(CONFIG_HSI_BOARDINFO) += hsi_boardinfo.o obj-$(CONFIG_HSI) += hsi.o obj-y += clients/ +obj-y += controllers/ diff --git a/drivers/hsi/controllers/Kconfig b/drivers/hsi/controllers/Kconfig new file mode 100644 index 00000000000..3efe0f027c7 --- /dev/null +++ b/drivers/hsi/controllers/Kconfig @@ -0,0 +1,23 @@ +# +# HSI controllers configuration +# +comment "HSI controllers" + +config OMAP_SSI + tristate "OMAP SSI hardware driver" + depends on ARCH_OMAP && HSI + default n + ---help--- + SSI is a legacy version of HSI. It is usually used to connect + an application engine with a cellular modem. + If you say Y here, you will enable the OMAP SSI hardware driver. + + If unsure, say N. + +if OMAP_SSI + +config OMAP_SSI_CONFIG + boolean + default y + +endif # OMAP_SSI diff --git a/drivers/hsi/controllers/Makefile b/drivers/hsi/controllers/Makefile new file mode 100644 index 00000000000..c4ba2c2c2bd --- /dev/null +++ b/drivers/hsi/controllers/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for HSI controllers drivers +# + +obj-$(CONFIG_OMAP_SSI) += omap_ssi.o -- cgit v1.2.3 From 1bd12ef5ea3f63f6a2002a7574d022ca7d2529ae Mon Sep 17 00:00:00 2001 From: Philippe Langlais Date: Wed, 19 Oct 2011 10:22:22 +0200 Subject: arch: arm: ST-E HSI controller Change-Id: Ibe70431ede8a9707b37e3394c22beb9acc42d8cb Signed-off-by: Pawel Szyszuk --- arch/arm/mach-ux500/include/mach/hsi.h | 122 +++++++++++++++++++++++++++++++++ 1 file changed, 122 insertions(+) create mode 100644 arch/arm/mach-ux500/include/mach/hsi.h diff --git a/arch/arm/mach-ux500/include/mach/hsi.h b/arch/arm/mach-ux500/include/mach/hsi.h new file mode 100644 index 00000000000..030e35e729b --- /dev/null +++ b/arch/arm/mach-ux500/include/mach/hsi.h @@ -0,0 +1,122 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * License Terms: GNU General Public License v2 + */ + +#ifndef __MACH_HSI_H +#define __MACH_HSI_H + +#include + +/* HSIT register offsets */ +#define STE_HSI_TX_ID 0x000 +#define STE_HSI_TX_MODE 0x004 +#define STE_HSI_TX_STATE 0x008 +#define STE_HSI_TX_IOSTATE 0x00C +#define STE_HSI_TX_BUFSTATE 0x010 +#define STE_HSI_TX_DIVISOR 0x014 +#define STE_HSI_TX_PARITY 0x018 +#define STE_HSI_TX_BREAK 0x01C +#define STE_HSI_TX_CHANNELS 0x020 +#define STE_HSI_TX_FLUSHBITS 0x024 +#define STE_HSI_TX_PRIORITY 0x028 +#define STE_HSI_TX_BURSTLEN 0x02C +#define STE_HSI_TX_PREAMBLE 0x030 +#define STE_HSI_TX_DATASWAP 0x034 +#define STE_HSI_TX_FRAMELENX 0x080 +#define STE_HSI_TX_BUFFERX 0x0C0 +#define STE_HSI_TX_BASEX 0x100 +#define STE_HSI_TX_SPANX 0x140 +#define STE_HSI_TX_GAUGEX 0x180 +#define STE_HSI_TX_WATERMARKX 0x1C0 +#define STE_HSI_TX_DMAEN 0x200 +#define STE_HSI_TX_WATERMARKIS 0x204 +#define STE_HSI_TX_WATERMARKIM 0x208 +#define STE_HSI_TX_WATERMARKIC 0x20C +#define STE_HSI_TX_WATERMARKID 0x210 +#define STE_HSI_TX_PERIPHID0 0xFE0 +#define STE_HSI_TX_PERIPHID1 0xFE4 +#define STE_HSI_TX_PERIPHID2 0xFE8 +#define STE_HSI_TX_PERIPHID3 0xFEC + +/* HSIR register offsets */ +#define STE_HSI_RX_ID 0x000 +#define STE_HSI_RX_MODE 0x004 +#define STE_HSI_RX_STATE 0x008 +#define STE_HSI_RX_BUFSTATE 0x00C +#define STE_HSI_RX_THRESHOLD 0x010 +#define STE_HSI_RX_PARITY 0x014 +#define STE_HSI_RX_DETECTOR 0x018 +#define STE_HSI_RX_EXCEP 0x01C +#define STE_HSI_RX_ACK 0x020 +#define STE_HSI_RX_CHANNELS 0x024 +#define STE_HSI_RX_REALTIME 0x028 +#define STE_HSI_RX_OVERRUN 0x02C +#define STE_HSI_RX_OVERRUNACK 0x030 +#define STE_HSI_RX_PREAMBLE 0x034 +#define STE_HSI_RX_PIPEGAUGE 0x038 +#define STE_HSI_RX_STATICCONFID 0x03C +#define STE_HSI_RX_BUFFERX 0x080 +#define STE_HSI_RX_FRAMELENX 0x0C0 +#define STE_HSI_RX_BASEX 0x100 +#define STE_HSI_RX_SPANX 0x140 +#define STE_HSI_RX_GAUGEX 0x180 +#define STE_HSI_RX_WATERMARKX 0x1C0 +#define STE_HSI_RX_DMAEN 0x200 +#define STE_HSI_RX_WATERMARKIS 0x204 +#define STE_HSI_RX_WATERMARKIM 0x208 +#define STE_HSI_RX_WATERMARKIC 0x20C +#define STE_HSI_RX_WATERMARKID 0x210 +#define STE_HSI_RX_OVERRUNMIS 0x214 +#define STE_HSI_RX_OVERRUNIM 0x218 +#define STE_HSI_RX_EXCEPMIS 0x21C +#define STE_HSI_RX_EXCEPIM 0x220 +#define STE_HSI_RX_PERIPHID0 0xFE0 +#define STE_HSI_RX_PERIPHID1 0xFE4 +#define STE_HSI_RX_PERIPHID2 0xFE8 +#define STE_HSI_RX_PERIPHID3 0xFEC + +/* HSI states */ +#define STE_HSI_STATE_IDLE 0x00 +#define STE_HSI_STATE_START 0x01 +#define STE_HSI_STATE_TRANSMIT 0x02 +#define STE_HSI_STATE_BREAK 0x03 +#define STE_HSI_STATE_FLUSH 0x04 +#define STE_HSI_STATE_HALT 0x05 + +/* HSI exceptions */ +#define STE_HSI_EXCEP_TIMEOUT 0x01 +#define STE_HSI_EXCEP_OVERRUN 0x02 +#define STE_HSI_EXCEP_BREAK 0x04 +#define STE_HSI_EXCEP_PARITY 0x08 + +/* HSI modes */ +#define STE_HSI_MODE_SLEEP 0x00 +#define STE_HSI_MODE_STREAM 0x01 +#define STE_HSI_MODE_FRAME 0x02 +#define STE_HSI_MODE_PIPELINED 0x03 +#define STE_HSI_MODE_FAILSAFE 0x04 + +#define STE_HSI_MAX_BUFFERS 32 + +/* Max channels of STE HSI controller */ +#define STE_HSI_MAX_CHANNELS 4 + +struct stedma40_chan_cfg; + +struct ste_hsi_port_cfg { +#ifdef CONFIG_STE_DMA40 + bool (*dma_filter)(struct dma_chan *chan, void *filter_param); + struct stedma40_chan_cfg *dma_tx_cfg; + struct stedma40_chan_cfg *dma_rx_cfg; +#endif +}; + +struct ste_hsi_platform_data { + int num_ports; + int use_dma; + struct ste_hsi_port_cfg *port_cfg; +}; + +#endif -- cgit v1.2.3 From 753573ca986c76431c912cc519f0bb4e24dfd17c Mon Sep 17 00:00:00 2001 From: Pawel Szyszuk Date: Mon, 18 Apr 2011 15:09:06 +0100 Subject: drivers: hsi: Add ST-E HSI controller Change-Id: I1cbd641a712a33f7d9551e8291dd83dd51b62b79 Signed-off-by: Pawel Szyszuk Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/21036 Reviewed-by: Jonas ABERG Signed-off-by: Robert Marklund --- drivers/hsi/controllers/Kconfig | 10 + drivers/hsi/controllers/Makefile | 1 + drivers/hsi/controllers/ste_hsi.c | 1535 +++++++++++++++++++++++++++++++++++++ 3 files changed, 1546 insertions(+) create mode 100644 drivers/hsi/controllers/ste_hsi.c diff --git a/drivers/hsi/controllers/Kconfig b/drivers/hsi/controllers/Kconfig index 3efe0f027c7..76d339eaf32 100644 --- a/drivers/hsi/controllers/Kconfig +++ b/drivers/hsi/controllers/Kconfig @@ -3,6 +3,16 @@ # comment "HSI controllers" +config STE_HSI + tristate "STE HSI controller driver" + depends on (ARCH_U8500 || ARCH_NOMADIK) && HSI + default n + help + ST-Ericsson HSI controller. + If you say Y here, you will enable the U8500 HSI hardware driver. + + If unsure, say N. + config OMAP_SSI tristate "OMAP SSI hardware driver" depends on ARCH_OMAP && HSI diff --git a/drivers/hsi/controllers/Makefile b/drivers/hsi/controllers/Makefile index c4ba2c2c2bd..475637a0f23 100644 --- a/drivers/hsi/controllers/Makefile +++ b/drivers/hsi/controllers/Makefile @@ -2,4 +2,5 @@ # Makefile for HSI controllers drivers # +obj-$(CONFIG_STE_HSI) += ste_hsi.o obj-$(CONFIG_OMAP_SSI) += omap_ssi.o diff --git a/drivers/hsi/controllers/ste_hsi.c b/drivers/hsi/controllers/ste_hsi.c new file mode 100644 index 00000000000..080a57ccade --- /dev/null +++ b/drivers/hsi/controllers/ste_hsi.c @@ -0,0 +1,1535 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * License Terms: GNU General Public License v2 + * Author: Marcin Mielczarczyk for ST-Ericsson + * Author: Lukasz Baj for ST-Ericsson + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_STE_DMA40 +#include +#include +#endif + +#include + +/** + * struct ste_hsi_controller - Nomadik HSI controller data + * @dev: device associated to the controller (HSI controller) + * @rx_base: HSI receiver registers base address + * @tx_base: HSI transmitter registers base address + */ +struct ste_hsi_controller { + struct device *dev; + struct clk *tx_clk; + struct clk *rx_clk; + struct clk *ssitx_clk; + struct clk *ssirx_clk; + struct delayed_work clk_work; + unsigned char __iomem *rx_base; + unsigned char __iomem *tx_base; + int overrun_irq[STE_HSI_MAX_CHANNELS]; + int ck_refcount; + spinlock_t ck_lock; + spinlock_t lock; + unsigned int use_dma:1; + unsigned int ck_on:1; + /* physical address of rx and tx controller */ + dma_addr_t rx_dma_base; + dma_addr_t tx_dma_base; +}; + +#ifdef CONFIG_STE_DMA40 +struct ste_hsi_channel_dma { + struct dma_chan *dma_chan; + struct dma_async_tx_descriptor *desc; + dma_cookie_t cookie; +}; +#endif + +struct ste_hsi_port { + struct device *dev; + struct list_head txqueue[STE_HSI_MAX_CHANNELS]; + struct list_head rxqueue[STE_HSI_MAX_CHANNELS]; + struct list_head brkqueue; + int tx_irq; + int rx_irq; + int excep_irq; + struct tasklet_struct rx_tasklet; + struct tasklet_struct tx_tasklet; + struct tasklet_struct exception_tasklet; + struct tasklet_struct overrun_tasklet; + unsigned char channels; +#ifdef CONFIG_STE_DMA40 + struct ste_hsi_channel_dma tx_dma[STE_HSI_MAX_CHANNELS]; + struct ste_hsi_channel_dma rx_dma[STE_HSI_MAX_CHANNELS]; +#endif +}; + +#define hsi_to_ste_port(port) (hsi_port_drvdata(port)) +#define hsi_to_ste_controller(con) (hsi_controller_drvdata(con)) +#define client_to_ste_port(cl) (hsi_port_drvdata(hsi_get_port(cl))) +#define client_to_hsi(cl) \ + (to_hsi_controller(hsi_get_port(cl)->device.parent)) +#define client_to_ste_controller(cl) \ + (hsi_controller_drvdata(client_to_hsi(cl))) +#define ste_port_to_ste_controller(port) \ + ((struct ste_hsi_controller *)hsi_controller_drvdata( \ + to_hsi_controller(port->dev->parent))) + +static u32 ste_hsir_periphid[8] = { 0x2C, 0, 0x8, 0x18, 0xD, 0xF0, 0x5, 0xB1 }; +static u32 ste_hsit_periphid[8] = { 0x2B, 0, 0x8, 0x18, 0xD, 0xF0, 0x5, 0xB1 }; + +/* + * linux/amba/bus.h macros can not be used, because 8 bytes are validated: + * PERIPHID0..3 and PCELLID0..3 for HSIR and HSIT. + */ +static inline int compare_periphid(u32 *id1, u32 *id2, int count) +{ + while (count && *id1++ == *id2++) + count--; + + return count; +} + +static void ste_hsi_clk_free(struct clk **pclk) +{ + if (IS_ERR(*pclk) && *pclk != NULL) + clk_put(*pclk); + *pclk = NULL; +} + +static void ste_hsi_clks_free(struct ste_hsi_controller *ste_hsi) +{ + ste_hsi_clk_free(&ste_hsi->rx_clk); + ste_hsi_clk_free(&ste_hsi->tx_clk); + ste_hsi_clk_free(&ste_hsi->ssirx_clk); + ste_hsi_clk_free(&ste_hsi->ssitx_clk); +} + +static int ste_hsi_clock_enable(struct hsi_controller *hsi) +{ + struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi); + int err = 0; + + spin_lock_bh(&ste_hsi->ck_lock); + if (ste_hsi->ck_refcount++ || ste_hsi->ck_on) + goto out; + + err = clk_enable(ste_hsi->ssirx_clk); + if (unlikely(err)) + goto out; + + err = clk_enable(ste_hsi->ssitx_clk); + if (unlikely(err)) + clk_disable(ste_hsi->ssirx_clk); + + err = clk_enable(ste_hsi->rx_clk); + if (unlikely(err)) { + clk_disable(ste_hsi->ssitx_clk); + clk_disable(ste_hsi->ssirx_clk); + } + + err = clk_enable(ste_hsi->tx_clk); + if (unlikely(err)) { + clk_disable(ste_hsi->rx_clk); + clk_disable(ste_hsi->ssitx_clk); + clk_disable(ste_hsi->ssirx_clk); + } + + ste_hsi->ck_on = 1; +out: + if (err) + ste_hsi->ck_refcount--; + + spin_unlock_bh(&ste_hsi->ck_lock); + + return err; +} + +static void ste_hsi_delayed_disable_clock(struct work_struct *work) +{ + struct ste_hsi_controller *ste_hsi; + ste_hsi = container_of(work, struct ste_hsi_controller, clk_work.work); + + spin_lock_bh(&ste_hsi->ck_lock); + + /* + * If clock should not be off (enable clock called in meantime) + * or clock is already off nothing to do + */ + if (ste_hsi->ck_refcount || !ste_hsi->ck_on) + goto out; + + if (readl(ste_hsi->tx_base + STE_HSI_TX_STATE) != STE_HSI_STATE_IDLE || + readl(ste_hsi->rx_base + STE_HSI_RX_STATE) + != STE_HSI_STATE_IDLE || + readl(ste_hsi->rx_base + STE_HSI_RX_BUFSTATE) != 0) { + /* Try again later */ + int err = schedule_delayed_work(&ste_hsi->clk_work, HZ); + if (err < 0) + dev_err(ste_hsi->dev, "Error scheduling work\n"); + goto out; + } + + /* Actual clocks disable */ + clk_disable(ste_hsi->tx_clk); + clk_disable(ste_hsi->rx_clk); + clk_disable(ste_hsi->ssitx_clk); + clk_disable(ste_hsi->ssirx_clk); + ste_hsi->ck_on = 0; + +out: + spin_unlock_bh(&ste_hsi->ck_lock); +} + +static void ste_hsi_clock_disable(struct hsi_controller *hsi) +{ + struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi); + + spin_lock_bh(&ste_hsi->ck_lock); + + /* Sanity check */ + if (ste_hsi->ck_refcount <= 0) + WARN_ON(ste_hsi->ck_refcount <= 0); + + /* Need clock to be disable now? */ + if (--ste_hsi->ck_refcount) + goto out; + + /* + * If receiver or transmitter is in the middle something delay clock off + */ + if (readl(ste_hsi->tx_base + STE_HSI_TX_STATE) != STE_HSI_STATE_IDLE || + readl(ste_hsi->rx_base + STE_HSI_RX_STATE) + != STE_HSI_STATE_IDLE || + readl(ste_hsi->rx_base + STE_HSI_RX_BUFSTATE) != 0) { + int err = schedule_delayed_work(&ste_hsi->clk_work, HZ); + if (err < 0) + dev_err(&hsi->device, "Error scheduling work\n"); + + goto out; + } + + /* Actual clocks disabled */ + clk_disable(ste_hsi->tx_clk); + clk_disable(ste_hsi->rx_clk); + clk_disable(ste_hsi->ssitx_clk); + clk_disable(ste_hsi->ssirx_clk); + ste_hsi->ck_on = 0; + +out: + spin_unlock_bh(&ste_hsi->ck_lock); +} + +static int ste_hsi_start_irq(struct hsi_msg *msg) +{ + struct hsi_port *port = hsi_get_port(msg->cl); + struct hsi_controller *hsi = to_hsi_controller(port->device.parent); + struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi); + u32 val; + int err; + + err = ste_hsi_clock_enable(hsi); + if (unlikely(err)) + return err; + + msg->actual_len = 0; + msg->status = HSI_STATUS_PROCEEDING; + + if (msg->ttype == HSI_MSG_WRITE) { + val = readl(ste_hsi->tx_base + STE_HSI_TX_WATERMARKIM) | + (1 << msg->channel); + writel(val, ste_hsi->tx_base + STE_HSI_TX_WATERMARKIM); + } else { + val = readl(ste_hsi->rx_base + STE_HSI_RX_WATERMARKIM) | + (1 << msg->channel); + writel(val, ste_hsi->rx_base + STE_HSI_RX_WATERMARKIM); + + val = readl(ste_hsi->rx_base + STE_HSI_RX_OVERRUNIM) | + (1 << msg->channel); + writel(val, ste_hsi->rx_base + STE_HSI_RX_OVERRUNIM); + } + + return 0; +} + +static int ste_hsi_start_transfer(struct ste_hsi_port *ste_port, + struct list_head *queue); +#ifdef CONFIG_STE_DMA40 +static void ste_hsi_dma_callback(void *dma_async_param) +{ + struct hsi_msg *msg = dma_async_param; + struct hsi_controller *hsi = client_to_hsi(msg->cl); + struct ste_hsi_port *ste_port = client_to_ste_port(msg->cl); + struct ste_hsi_controller *ste_hsi = client_to_ste_controller(msg->cl); + struct list_head *queue; + struct dma_chan *chan; + struct ste_hsi_channel_dma *hsi_dma_chan; + char *dma_enable_address; + enum dma_data_direction direction; + u32 dma_mask; + + /* Message finished, remove from list and notify client */ + spin_lock_bh(&ste_hsi->lock); + list_del(&msg->link); + + if (msg->ttype == HSI_MSG_WRITE) { + queue = &ste_port->txqueue[msg->channel]; + direction = DMA_TO_DEVICE; + dma_enable_address = ste_hsi->tx_base + STE_HSI_TX_DMAEN; + hsi_dma_chan = &ste_port->tx_dma[msg->channel]; + } else { + queue = &ste_port->rxqueue[msg->channel]; + direction = DMA_FROM_DEVICE; + dma_enable_address = ste_hsi->rx_base + STE_HSI_RX_DMAEN; + hsi_dma_chan = &ste_port->rx_dma[msg->channel]; + } + + dma_sync_sg_for_cpu(&hsi->device, msg->sgt.sgl, + msg->sgt.nents, direction); + chan = hsi_dma_chan->dma_chan; + + /* disable DMA channel on HSI controller */ + dma_mask = readl(dma_enable_address); + writel(dma_mask & ~(1 << msg->channel), dma_enable_address); + + hsi_dma_chan->desc = NULL; + + dma_unmap_sg(&hsi->device, msg->sgt.sgl, msg->sgt.nents, direction); + + msg->status = HSI_STATUS_COMPLETED; + msg->actual_len = sg_dma_len(msg->sgt.sgl); + + spin_unlock_bh(&ste_hsi->lock); + + msg->complete(msg); + + ste_hsi_clock_disable(hsi); + + spin_lock_bh(&ste_hsi->lock); + ste_hsi_start_transfer(ste_port, queue); + spin_unlock_bh(&ste_hsi->lock); +} + +static void dma_device_control(struct ste_hsi_channel_dma *chan, + enum dma_ctrl_cmd cmd, unsigned long arg) +{ + chan->dma_chan->device->device_control(chan->dma_chan, cmd, arg); +} + +static void ste_hsi_terminate_dma_chan(struct ste_hsi_channel_dma *chan) +{ + if (chan->desc) { + dma_device_control(chan, DMA_TERMINATE_ALL, 0); + chan->desc = NULL; + } + chan->cookie = 0; +} + +static void ste_hsi_terminate_dma(struct ste_hsi_port *ste_port) +{ + int i; + + for (i = 0; i < ste_port->channels; ++i) { + ste_hsi_terminate_dma_chan(&ste_port->tx_dma[i]); + ste_hsi_terminate_dma_chan(&ste_port->rx_dma[i]); + } +} + +static int ste_hsi_start_dma(struct hsi_msg *msg) +{ + struct hsi_controller *hsi = client_to_hsi(msg->cl); + struct ste_hsi_port *ste_port = client_to_ste_port(msg->cl); + struct ste_hsi_controller *ste_hsi = client_to_ste_controller(msg->cl); + struct dma_async_tx_descriptor *desc; + struct dma_chan *chan; + struct ste_hsi_channel_dma *hsi_dma_chan; + char *dma_enable_address; + enum dma_data_direction direction; + u32 dma_mask; + int err; + + err = ste_hsi_clock_enable(hsi); + if (unlikely(err)) + return err; + + if (msg->ttype == HSI_MSG_WRITE) { + direction = DMA_TO_DEVICE; + dma_enable_address = ste_hsi->tx_base + STE_HSI_TX_DMAEN; + hsi_dma_chan = &ste_port->tx_dma[msg->channel]; + } else { + u32 val; + direction = DMA_FROM_DEVICE; + dma_enable_address = ste_hsi->rx_base + STE_HSI_RX_DMAEN; + hsi_dma_chan = &ste_port->rx_dma[msg->channel]; + + /* enable overrun for this channel */ + val = readl(ste_hsi->rx_base + STE_HSI_RX_OVERRUNIM) | + (1 << msg->channel); + writel(val, ste_hsi->rx_base + STE_HSI_RX_OVERRUNIM); + } + + chan = hsi_dma_chan->dma_chan; + + if (0 == dma_map_sg(&hsi->device, msg->sgt.sgl, msg->sgt.nents, + direction)) { + dev_dbg(&hsi->device, "DMA map SG failed !\n"); + err = -ENOMEM; + goto out; + } + /* Prepare the scatterlist */ + desc = chan->device->device_prep_slave_sg(chan, + msg->sgt.sgl, + msg->sgt.nents, + direction, + DMA_PREP_INTERRUPT | + DMA_CTRL_ACK); + + if (!desc) { + dma_unmap_sg(&hsi->device, msg->sgt.sgl, msg->sgt.nents, + direction); + /* "Complete" DMA (errorpath) */ + ste_hsi_terminate_dma_chan(hsi_dma_chan); + err = -EBUSY; + goto out; + } + desc->callback = ste_hsi_dma_callback; + desc->callback_param = msg; + hsi_dma_chan->cookie = desc->tx_submit(desc); + hsi_dma_chan->desc = desc; + + /* Fire the DMA transaction */ + chan->device->device_issue_pending(chan); + + /* Enable DMA channel on HSI controller */ + dma_mask = readl(dma_enable_address); + writel(dma_mask | 1 << msg->channel, dma_enable_address); + +out: + if (unlikely(err)) + ste_hsi_clock_disable(hsi); + + return err; +} + +static void __init ste_hsi_init_dma(struct ste_hsi_platform_data *data, + struct hsi_controller *hsi) +{ + struct hsi_port *port; + struct ste_hsi_port *ste_port; + struct ste_hsi_controller *ste_hsi = hsi_to_ste_controller(hsi); + dma_cap_mask_t mask; + int i, ch; + + ste_hsi->use_dma = 1; + /* Try to acquire a generic DMA engine slave channel */ + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + for (i = 0; i < hsi->num_ports; ++i) { + port = &hsi->port[i]; + ste_port = hsi_port_drvdata(port); + + for (ch = 0; ch < STE_HSI_MAX_CHANNELS; ++ch) { + ste_port->tx_dma[ch].dma_chan = + dma_request_channel(mask, + data->port_cfg[i].dma_filter, + &data->port_cfg[i]. + dma_tx_cfg[ch]); + + ste_port->rx_dma[ch].dma_chan = + dma_request_channel(mask, + data->port_cfg[i].dma_filter, + &data->port_cfg[i]. + dma_rx_cfg[ch]); + } + } +} + +static int ste_hsi_setup_dma(struct hsi_client *cl) +{ + int i; + struct hsi_port *port = to_hsi_port(cl->device.parent); + struct ste_hsi_port *ste_port = hsi_port_drvdata(port); + struct hsi_controller *hsi = to_hsi_controller(port->device.parent); + struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi); + struct dma_slave_config rx_conf = { + .src_addr = 0, /* dynamic data */ + .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, + .direction = DMA_FROM_DEVICE, + .src_maxburst = 1, + }; + struct dma_slave_config tx_conf = { + .dst_addr = 0, /* dynamic data */ + .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, + .direction = DMA_TO_DEVICE, + .dst_maxburst = 1, + }; + + if (!ste_hsi->use_dma) + return 0; + + for (i = 0; i < ste_port->channels; ++i) { + struct dma_chan *chan; + + chan = ste_port->tx_dma[i].dma_chan; + tx_conf.dst_addr = (dma_addr_t) ste_hsi->tx_dma_base + + STE_HSI_TX_BUFFERX + 4 * i; + chan->device->device_control(chan, + DMA_SLAVE_CONFIG, + (unsigned long)&tx_conf); + + chan = ste_port->rx_dma[i].dma_chan; + rx_conf.src_addr = (dma_addr_t) ste_hsi->rx_dma_base + + STE_HSI_RX_BUFFERX + 4 * i; + chan->device->device_control(chan, + DMA_SLAVE_CONFIG, + (unsigned long)&rx_conf); + } + + return 0; +} + +#else +#define ste_hsi_init_dma(data, hsi) do { } while (0) +#define ste_hsi_start_dma ste_hsi_start_irq +#define ste_hsi_terminate_dma(ste_port) do { } while (0) +#define ste_hsi_setup_dma(cl) do { } while (0) +#endif + +static int ste_hsi_start_transfer(struct ste_hsi_port *ste_port, + struct list_head *queue) +{ + struct hsi_msg *msg; + int err; + + if (list_empty(queue)) + return 0; + + msg = list_first_entry(queue, struct hsi_msg, link); + if (msg->status != HSI_STATUS_QUEUED) + return 0; + + msg->actual_len = 0; + msg->status = HSI_STATUS_PROCEEDING; + + if (ste_port_to_ste_controller(ste_port)->use_dma) + err = ste_hsi_start_dma(msg); + else + err = ste_hsi_start_irq(msg); + + return err; +} + +static void ste_hsi_receive_data(struct hsi_port *port, unsigned int channel) +{ + struct ste_hsi_port *ste_port = hsi_port_drvdata(port); + struct hsi_controller *hsi = to_hsi_controller(port->device.parent); + struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi); + struct list_head *queue = &ste_port->rxqueue[channel]; + struct hsi_msg *msg; + char *bufferx; + u8 *buf; + int span; + + spin_lock_bh(&ste_hsi->lock); + + if (list_empty(queue)) + goto out; + + msg = list_first_entry(queue, struct hsi_msg, link); + if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) { + msg->actual_len = 0; + msg->status = HSI_STATUS_PENDING; + } + + if (msg->status == HSI_STATUS_PROCEEDING && msg->ttype == HSI_MSG_READ) { + unsigned char len; + bufferx = ste_hsi->rx_base + STE_HSI_RX_BUFFERX + 4 * channel; + + len = readl(ste_hsi->rx_base + STE_HSI_RX_GAUGEX + 4 * channel); + buf = sg_virt(msg->sgt.sgl); + buf += msg->actual_len; + while (len--) { + *(u32 *) buf = readl(bufferx); + buf += 4; + msg->actual_len += 4; + if (msg->actual_len >= msg->sgt.sgl->length) { + msg->status = HSI_STATUS_COMPLETED; + break; + } + } + } + + /* re-enable interrupt by watermark manipulation */ + span = readl(ste_hsi->rx_base + STE_HSI_RX_SPANX + 4 * channel); + writel(span, ste_hsi->rx_base + STE_HSI_RX_WATERMARKX + 4 * channel); + writel(0, ste_hsi->rx_base + STE_HSI_RX_WATERMARKX + 4 * channel); + + /* + * If message was not transmitted completely enable interrupt for + * further work + */ + if (msg->status == HSI_STATUS_PROCEEDING) { + u32 val; + val = readl(ste_hsi->rx_base + STE_HSI_RX_WATERMARKIM) | + (1 << channel); + writel(val, ste_hsi->rx_base + STE_HSI_RX_WATERMARKIM); + goto out; + } + + /* Message finished, remove from list and notify client */ + list_del(&msg->link); + spin_unlock_bh(&ste_hsi->lock); + msg->complete(msg); + + ste_hsi_clock_disable(hsi); + + spin_lock_bh(&ste_hsi->lock); + + ste_hsi_start_transfer(ste_port, queue); +out: + spin_unlock_bh(&ste_hsi->lock); +} + +static void ste_hsi_transmit_data(struct hsi_port *port, unsigned int channel) +{ + struct ste_hsi_port *ste_port = hsi_port_drvdata(port); + struct hsi_controller *hsi = to_hsi_controller(port->device.parent); + struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi); + struct list_head *queue = &ste_port->txqueue[channel]; + struct hsi_msg *msg; + u8 *buf; + int span; + + if (list_empty(queue)) + return; + + spin_lock_bh(&ste_hsi->lock); + msg = list_first_entry(queue, struct hsi_msg, link); + if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) { + msg->actual_len = 0; + msg->status = HSI_STATUS_PENDING; + } + + if (msg->status == HSI_STATUS_PROCEEDING && + msg->ttype == HSI_MSG_WRITE) { + unsigned char free_space; + + free_space = readl(ste_hsi->tx_base + + STE_HSI_TX_GAUGEX + 4 * channel); + buf = sg_virt(msg->sgt.sgl); + buf += msg->actual_len; + while (free_space--) { + writel(*(u32 *) buf, ste_hsi->tx_base + + STE_HSI_TX_BUFFERX + 4 * channel); + buf += 4; + msg->actual_len += 4; + if (msg->actual_len >= msg->sgt.sgl->length) { + msg->status = HSI_STATUS_COMPLETED; + break; + } + } + } + + span = readl(ste_hsi->tx_base + STE_HSI_TX_SPANX + 4 * channel); + writel(span, ste_hsi->tx_base + STE_HSI_TX_WATERMARKX + 4 * channel); + writel(0, ste_hsi->tx_base + STE_HSI_TX_WATERMARKX + 4 * channel); + + if (msg->status == HSI_STATUS_PROCEEDING) { + u32 val; + val = readl(ste_hsi->tx_base + STE_HSI_TX_WATERMARKIM) | + (1 << channel); + writel(val, ste_hsi->tx_base + STE_HSI_TX_WATERMARKIM); + goto out; + } + + list_del(&msg->link); + spin_unlock_bh(&ste_hsi->lock); + msg->complete(msg); + + ste_hsi_clock_disable(hsi); + + spin_lock_bh(&ste_hsi->lock); + ste_hsi_start_transfer(ste_port, queue); +out: + spin_unlock_bh(&ste_hsi->lock); +} + +static void ste_hsi_rx_tasklet(unsigned long data) +{ + struct hsi_port *port = (struct hsi_port *)data; + struct hsi_controller *hsi = to_hsi_controller(port->device.parent); + struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi); + struct ste_hsi_port *ste_port = hsi_port_drvdata(port); + u32 irq_status, irq_mask; + unsigned int i; + + irq_status = readl(ste_hsi->rx_base + STE_HSI_RX_WATERMARKIS); + if (!irq_status) + goto out; + + irq_mask = readl(ste_hsi->rx_base + STE_HSI_RX_WATERMARKIM); + writel(irq_mask & ~irq_status, + ste_hsi->rx_base + STE_HSI_RX_WATERMARKIM); + writel(irq_mask, ste_hsi->rx_base + STE_HSI_RX_WATERMARKIC); + + for (i = 0; i < STE_HSI_MAX_CHANNELS; ++i) { + if (irq_status & (1 << i)) + ste_hsi_receive_data(port, i); + } +out: + enable_irq(ste_port->rx_irq); +} + +static irqreturn_t ste_hsi_rx_isr(int irq, void *data) +{ + struct hsi_port *port = data; + struct ste_hsi_port *ste_port = hsi_port_drvdata(port); + + disable_irq_nosync(irq); + tasklet_hi_schedule(&ste_port->rx_tasklet); + + return IRQ_HANDLED; +} + +static irqreturn_t ste_hsi_tx_isr(int irq, void *data) +{ + struct hsi_port *port = data; + struct ste_hsi_port *ste_port = hsi_port_drvdata(port); + + disable_irq_nosync(irq); + tasklet_hi_schedule(&ste_port->tx_tasklet); + + return IRQ_HANDLED; +} + +static void ste_hsi_tx_tasklet(unsigned long data) +{ + struct hsi_port *port = (struct hsi_port *)data; + struct hsi_controller *hsi = to_hsi_controller(port->device.parent); + struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi); + struct ste_hsi_port *ste_port = hsi_port_drvdata(port); + u32 irq_status, irq_mask; + unsigned int i; + + irq_status = readl(ste_hsi->tx_base + STE_HSI_TX_WATERMARKIS); + if (!irq_status) + goto out; + + irq_mask = readl(ste_hsi->tx_base + STE_HSI_TX_WATERMARKIM); + writel(irq_mask & ~irq_status, + ste_hsi->tx_base + STE_HSI_TX_WATERMARKIM); + writel(irq_mask, ste_hsi->tx_base + STE_HSI_TX_WATERMARKIC); + + for (i = 0; i < STE_HSI_MAX_CHANNELS; ++i) { + if (irq_status & (1 << i)) + ste_hsi_transmit_data(port, i); + } +out: + enable_irq(ste_port->tx_irq); +} + +static void ste_hsi_break_complete(struct hsi_port *port, + struct ste_hsi_controller *ste_hsi) +{ + struct ste_hsi_port *ste_port = hsi_port_drvdata(port); + struct hsi_msg *msg, *tmp; + u32 mask; + + dev_dbg(port->device.parent, "HWBREAK received\n"); + + spin_lock_bh(&ste_hsi->lock); + + mask = readl(ste_hsi->rx_base + STE_HSI_RX_EXCEPIM); + writel(mask & ~STE_HSI_EXCEP_BREAK, + ste_hsi->rx_base + STE_HSI_RX_EXCEPIM); + + spin_unlock_bh(&ste_hsi->lock); + + list_for_each_entry_safe(msg, tmp, &ste_port->brkqueue, link) { + msg->status = HSI_STATUS_COMPLETED; + list_del(&msg->link); + msg->complete(msg); + } +} + +static void ste_hsi_error(struct hsi_port *port) +{ + struct ste_hsi_port *ste_port = hsi_port_drvdata(port); + struct hsi_msg *msg; + unsigned int i; + + for (i = 0; i < ste_port->channels; i++) { + if (list_empty(&ste_port->rxqueue[i])) + continue; + msg = list_first_entry(&ste_port->rxqueue[i], struct hsi_msg, + link); + list_del(&msg->link); + msg->status = HSI_STATUS_ERROR; + msg->complete(msg); + /* Now restart queued reads if any */ + ste_hsi_start_transfer(ste_port, &ste_port->rxqueue[i]); + } +} + +static void ste_hsi_exception_tasklet(unsigned long data) +{ + struct hsi_port *port = (struct hsi_port *)data; + struct hsi_controller *hsi = to_hsi_controller(port->device.parent); + struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi); + struct ste_hsi_port *ste_port = hsi_port_drvdata(port); + u32 error_status; + u32 error_interrupts; + + error_status = readl(ste_hsi->rx_base + STE_HSI_RX_EXCEP); + /* + * sometimes interrupt that cause running this tasklet is already + * inactive so base handling of exception on masked interrupt status + * not on exception state register. + */ + error_interrupts = readl(ste_hsi->rx_base + STE_HSI_RX_EXCEPMIS); + + if (error_interrupts & STE_HSI_EXCEP_BREAK) + ste_hsi_break_complete(port, ste_hsi); + + if (error_interrupts & STE_HSI_EXCEP_TIMEOUT) + dev_err(&hsi->device, "timeout exception occurred\n"); + if (error_interrupts & STE_HSI_EXCEP_OVERRUN) + dev_err(&hsi->device, "overrun exception occurred\n"); + if (error_interrupts & STE_HSI_EXCEP_PARITY) + dev_err(&hsi->device, "parity exception occurred\n"); + + if (error_interrupts & ~STE_HSI_EXCEP_BREAK) + ste_hsi_error(port); + + /* Acknowledge exception interrupts */ + writel(error_status, ste_hsi->rx_base + STE_HSI_RX_ACK); + + enable_irq(ste_port->excep_irq); +} + +static irqreturn_t ste_hsi_exception_isr(int irq, void *data) +{ + struct hsi_port *port = data; + struct ste_hsi_port *ste_port = hsi_port_drvdata(port); + + disable_irq_nosync(irq); + tasklet_hi_schedule(&ste_port->exception_tasklet); + + return IRQ_HANDLED; +} + +static void ste_hsi_overrun_tasklet(unsigned long data) +{ + struct hsi_controller *hsi = (struct hsi_controller *)data; + struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi); + struct hsi_port *hsi_port = &hsi->port[0]; + struct ste_hsi_port *ste_port = hsi_port_drvdata(hsi_port); + struct hsi_msg *msg; + + unsigned int channel; + u8 rised_overrun; + u8 mask; + u8 blocked = 0; + + rised_overrun = (u8) readl(ste_hsi->rx_base + STE_HSI_RX_OVERRUNMIS); + mask = rised_overrun; + for (channel = 0; mask; ++channel, mask >>= 1) { + if (!(mask & 1)) + continue; + + do { + /* + * No more messages, block interrupt + */ + if (list_empty(&ste_port->rxqueue[channel])) { + blocked |= 1 << channel; + break; + } + /* + * Complete with error + */ + msg = list_first_entry(&ste_port->rxqueue[channel], + struct hsi_msg, link); + list_del(&msg->link); + msg->status = HSI_STATUS_ERROR; + msg->complete(msg); + + /* + * Now restart queued reads if any + * If start_transfer fails, try with next message + */ + if (ste_hsi_start_transfer(ste_port, + &ste_port->rxqueue[channel])) + continue; + } while (0); + } + + /* Overrun acknowledge */ + writel(rised_overrun, ste_hsi->rx_base + STE_HSI_RX_OVERRUNACK); + writel(~blocked & readl(ste_hsi->rx_base + STE_HSI_RX_OVERRUNIM), + ste_hsi->rx_base + STE_HSI_RX_OVERRUNIM); + + /* + * Enable all that should not be blocked + */ + mask = rised_overrun & ~blocked; + for (channel = 0; mask; ++channel, mask >>= 1) + enable_irq(ste_hsi->overrun_irq[channel]); +} + +static irqreturn_t ste_hsi_overrun_isr(int irq, void *data) +{ + struct hsi_port *port = data; + struct ste_hsi_port *ste_port = hsi_port_drvdata(port); + + disable_irq_nosync(irq); + tasklet_hi_schedule(&ste_port->overrun_tasklet); + + return IRQ_HANDLED; +} + +static void __init ste_hsi_queues_init(struct ste_hsi_port *ste_port) +{ + unsigned int ch; + + for (ch = 0; ch < STE_HSI_MAX_CHANNELS; ch++) { + INIT_LIST_HEAD(&ste_port->txqueue[ch]); + INIT_LIST_HEAD(&ste_port->rxqueue[ch]); + } + INIT_LIST_HEAD(&ste_port->brkqueue); +} + +static int __init ste_hsi_get_iomem(struct platform_device *pdev, + const char *res_name, + unsigned char __iomem **base, + dma_addr_t *phy) +{ + struct resource *mem; + struct resource *ioarea; + + mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name); + if (!mem) { + dev_err(&pdev->dev, "IO memory region missing!\n"); + return -ENXIO; + } + + ioarea = devm_request_mem_region(&pdev->dev, mem->start, + resource_size(mem), + dev_name(&pdev->dev)); + if (!ioarea) { + dev_err(&pdev->dev, "Can't request IO memory region!\n"); + return -ENXIO; + } + + *base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); + if (!base) { + dev_err(&pdev->dev, "%s IO remap failed!\n", mem->name); + return -ENXIO; + } + if (phy) + *phy = (dma_addr_t) mem->start; + + return 0; +} + +static int __init ste_hsi_get_irq(struct platform_device *pdev, + const char *res_name, + irqreturn_t(*isr) (int, void *), void *data, + int *irq_number) +{ + struct resource *irq; + int err; + + irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name); + if (!irq) { + dev_err(&pdev->dev, "IO memory region missing!\n"); + return -ENXIO; + } + + err = devm_request_irq(&pdev->dev, irq->start, isr, + IRQF_DISABLED, irq->name, data); + if (err) + dev_err(&pdev->dev, "%s IRQ request failed!\n", irq->name); + + if (irq_number) + *irq_number = irq->start; + + return err; +} + +static void ste_hsi_flush_queue(struct list_head *queue, struct hsi_client *cl) +{ + struct list_head *node, *tmp; + struct hsi_msg *msg; + + list_for_each_safe(node, tmp, queue) { + msg = list_entry(node, struct hsi_msg, link); + if ((cl) && (cl != msg->cl)) + continue; + list_del(node); + + if (msg->destructor) + msg->destructor(msg); + else + hsi_free_msg(msg); + } +} + +static int ste_hsi_async_break(struct hsi_msg *msg) +{ + struct hsi_port *port = hsi_get_port(msg->cl); + struct ste_hsi_port *ste_port = hsi_to_ste_port(port); + struct hsi_controller *hsi = to_hsi_controller(port->device.parent); + struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi); + int err; + + err = ste_hsi_clock_enable(hsi); + if (unlikely(err)) + return err; + + if (msg->ttype == HSI_MSG_WRITE) { + if (port->tx_cfg.mode != HSI_MODE_FRAME) { + err = -EINVAL; + goto out; + } + writel(1, ste_hsi->tx_base + STE_HSI_TX_BREAK); + msg->status = HSI_STATUS_COMPLETED; + msg->complete(msg); + } else { + u32 mask; + if (port->rx_cfg.mode != HSI_MODE_FRAME) { + err = -EINVAL; + goto out; + } + spin_lock_bh(&ste_hsi->lock); + msg->status = HSI_STATUS_PROCEEDING; + mask = readl(ste_hsi->rx_base + STE_HSI_RX_EXCEPIM); + /* Enable break exception on controller */ + if (!(mask & STE_HSI_EXCEP_BREAK)) + writel(mask | STE_HSI_EXCEP_BREAK, + ste_hsi->rx_base + STE_HSI_RX_EXCEPIM); + + list_add_tail(&msg->link, &ste_port->brkqueue); + spin_unlock_bh(&ste_hsi->lock); + } + +out: + ste_hsi_clock_disable(hsi); + return err; +} + +static int ste_hsi_async(struct hsi_msg *msg) +{ + struct ste_hsi_controller *ste_hsi; + struct ste_hsi_port *ste_port; + struct list_head *queue; + int err = 0; + + if (unlikely(!msg)) + return -ENOSYS; + + if (msg->sgt.nents > 1) + return -ENOSYS; + + if (unlikely(msg->break_frame)) + return ste_hsi_async_break(msg); + + ste_port = client_to_ste_port(msg->cl); + ste_hsi = client_to_ste_controller(msg->cl); + + if (msg->ttype == HSI_MSG_WRITE) { + /* TX transfer */ + BUG_ON(msg->channel >= ste_port->channels); + queue = &ste_port->txqueue[msg->channel]; + } else { + /* RX transfer */ + queue = &ste_port->rxqueue[msg->channel]; + } + + spin_lock_bh(&ste_hsi->lock); + list_add_tail(&msg->link, queue); + msg->status = HSI_STATUS_QUEUED; + + err = ste_hsi_start_transfer(ste_port, queue); + if (err) + list_del(&msg->link); + + spin_unlock_bh(&ste_hsi->lock); + + return err; +} + +static int ste_hsi_setup(struct hsi_client *cl) +{ + struct hsi_port *port = to_hsi_port(cl->device.parent); + struct ste_hsi_port *ste_port = hsi_port_drvdata(port); + struct hsi_controller *hsi = to_hsi_controller(port->device.parent); + struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi); + int err, i, buffers; + u32 div = 0; + + err = ste_hsi_clock_enable(hsi); + if (unlikely(err)) + return err; + + if (cl->tx_cfg.speed) { + div = clk_get_rate(ste_hsi->tx_clk) / 1000 / cl->tx_cfg.speed; + if (div) + --div; + } + + port->tx_cfg = cl->tx_cfg; + port->rx_cfg = cl->rx_cfg; + /* Configure TX */ + writel(cl->tx_cfg.mode, ste_hsi->tx_base + STE_HSI_TX_MODE); + writel(div, ste_hsi->tx_base + STE_HSI_TX_DIVISOR); + writel(0, ste_hsi->tx_base + STE_HSI_TX_PARITY); + /* TODO: Wait for idle here */ + writel(cl->tx_cfg.channels, ste_hsi->tx_base + STE_HSI_TX_CHANNELS); + /* Calculate buffers number per channel */ + buffers = STE_HSI_MAX_BUFFERS / cl->tx_cfg.channels; + for (i = 0; i < cl->tx_cfg.channels; i++) { + /* Set 32 bit long frames */ + writel(31, ste_hsi->tx_base + STE_HSI_TX_FRAMELENX + 4 * i); + writel(buffers * i, + ste_hsi->tx_base + STE_HSI_TX_BASEX + 4 * i); + writel(buffers - 1, + ste_hsi->tx_base + STE_HSI_TX_SPANX + 4 * i); + writel(buffers - 1, + ste_hsi->tx_base + STE_HSI_TX_WATERMARKX + 4 * i); + writel(0, ste_hsi->tx_base + STE_HSI_TX_WATERMARKX + 4 * i); + } + + /* Configure RX */ + writel(cl->rx_cfg.mode, ste_hsi->rx_base + STE_HSI_RX_MODE); + writel(0, ste_hsi->rx_base + STE_HSI_RX_PARITY); + writel(cl->rx_cfg.channels, ste_hsi->rx_base + STE_HSI_RX_CHANNELS); + /* Calculate buffers number per channel */ + buffers = STE_HSI_MAX_BUFFERS / cl->rx_cfg.channels; + for (i = 0; i < cl->rx_cfg.channels; i++) { + /* Set 32 bit long frames */ + writel(31, ste_hsi->rx_base + STE_HSI_RX_FRAMELENX + 4 * i); + writel(buffers * i, + ste_hsi->rx_base + STE_HSI_RX_BASEX + 4 * i); + writel(buffers - 1, + ste_hsi->rx_base + STE_HSI_RX_SPANX + 4 * i); + writel(buffers - 1, + ste_hsi->rx_base + STE_HSI_RX_WATERMARKX + 4 * i); + writel(0, ste_hsi->rx_base + STE_HSI_RX_WATERMARKX + 4 * i); + } + + ste_port->channels = max(cl->tx_cfg.channels, cl->rx_cfg.channels); + + ste_hsi_setup_dma(cl); + + ste_hsi_clock_disable(hsi); + return err; +} + +static int ste_hsi_flush(struct hsi_client *cl) +{ + struct hsi_port *port = to_hsi_port(cl->device.parent); + struct ste_hsi_port *ste_port = hsi_port_drvdata(port); + struct hsi_controller *hsi = to_hsi_controller(port->device.parent); + struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi); + int i; + + ste_hsi_clock_enable(hsi); + + /* Enter sleep mode */ + writel(STE_HSI_MODE_SLEEP, ste_hsi->rx_base + STE_HSI_RX_MODE); + + /* Disable DMA, and terminate all outstanding jobs */ + writel(0, ste_hsi->tx_base + STE_HSI_TX_DMAEN); + writel(0, ste_hsi->rx_base + STE_HSI_RX_DMAEN); + ste_hsi_terminate_dma(ste_port); + + /* Flush all HSIR and HSIT buffers */ + writel(0, ste_hsi->tx_base + STE_HSI_TX_STATE); + writel(0, ste_hsi->tx_base + STE_HSI_TX_BUFSTATE); + writel(0, ste_hsi->rx_base + STE_HSI_RX_STATE); + /* + * BUFSTATE is cleared twice on purpose: + * first time all fifos are cleared + * second time to clear data that was in pipline buffer + * and was transfered to fifos + */ + writel(0, ste_hsi->rx_base + STE_HSI_RX_BUFSTATE); + writel(0, ste_hsi->rx_base + STE_HSI_RX_BUFSTATE); + + /* Flush all errors */ + writel(0, ste_hsi->rx_base + STE_HSI_RX_EXCEP); + + /* Clear interrupts */ + writel(0, ste_hsi->rx_base + STE_HSI_RX_WATERMARKIM); + writel(0, ste_hsi->rx_base + STE_HSI_RX_WATERMARKIC); + writel(0, ste_hsi->tx_base + STE_HSI_TX_WATERMARKIM); + writel(0, ste_hsi->tx_base + STE_HSI_TX_WATERMARKIC); + writel(0xFF, ste_hsi->rx_base + STE_HSI_RX_OVERRUNACK); + writel(0, ste_hsi->rx_base + STE_HSI_RX_OVERRUNIM); + writel(0, ste_hsi->rx_base + STE_HSI_RX_EXCEPIM); + writel(0x0F, ste_hsi->rx_base + STE_HSI_RX_ACK); + + /* Dequeue all pending requests */ + for (i = 0; i < ste_port->channels; i++) { + /* Release write clocks */ + if (!list_empty(&ste_port->txqueue[i])) + ste_hsi_clock_disable(hsi); + if (!list_empty(&ste_port->rxqueue[i])) + ste_hsi_clock_disable(hsi); + ste_hsi_flush_queue(&ste_port->txqueue[i], NULL); + ste_hsi_flush_queue(&ste_port->rxqueue[i], NULL); + } + ste_hsi_flush_queue(&ste_port->brkqueue, NULL); + + ste_hsi_clock_disable(hsi); + + return 0; +} + +static int ste_hsi_start_tx(struct hsi_client *cl) +{ + return 0; +} + +static int ste_hsi_stop_tx(struct hsi_client *cl) +{ + return 0; +} + +static int ste_hsi_release(struct hsi_client *cl) +{ + int err; + struct ste_hsi_controller *ste_hsi = client_to_ste_controller(cl); + + err = ste_hsi_flush(cl); + cancel_delayed_work(&ste_hsi->clk_work); + + return 0; +} + +static int ste_hsi_ports_init(struct hsi_controller *hsi, + struct platform_device *pdev) +{ + struct hsi_port *port; + struct ste_hsi_port *ste_port; + unsigned int i; + char irq_name[20]; + int err; + + for (i = 0; i < hsi->num_ports; i++) { + ste_port = devm_kzalloc(&pdev->dev, sizeof *ste_port, + GFP_KERNEL); + if (!ste_port) + return -ENOMEM; + + port = &hsi->port[i]; + port->async = ste_hsi_async; + port->setup = ste_hsi_setup; + port->flush = ste_hsi_flush; + port->start_tx = ste_hsi_start_tx; + port->stop_tx = ste_hsi_stop_tx; + port->release = ste_hsi_release; + hsi_port_set_drvdata(port, ste_port); + ste_port->dev = &port->device; + + sprintf(irq_name, "hsi_rx_irq%d", i); + err = ste_hsi_get_irq(pdev, irq_name, ste_hsi_rx_isr, port, + &ste_port->rx_irq); + if (err) + return err; + + sprintf(irq_name, "hsi_tx_irq%d", i); + err = ste_hsi_get_irq(pdev, irq_name, ste_hsi_tx_isr, port, + &ste_port->tx_irq); + if (err) + return err; + + tasklet_init(&ste_port->rx_tasklet, ste_hsi_rx_tasklet, + (unsigned long)port); + + tasklet_init(&ste_port->tx_tasklet, ste_hsi_tx_tasklet, + (unsigned long)port); + + tasklet_init(&ste_port->exception_tasklet, + ste_hsi_exception_tasklet, (unsigned long)port); + + tasklet_init(&ste_port->overrun_tasklet, + ste_hsi_overrun_tasklet, (unsigned long)port); + + sprintf(irq_name, "hsi_rx_excep%d", i); + err = ste_hsi_get_irq(pdev, irq_name, ste_hsi_exception_isr, + port, &ste_port->excep_irq); + if (err) + return err; + + ste_hsi_queues_init(ste_port); + } + return 0; +} + +static int __init ste_hsi_hw_init(struct hsi_controller *hsi) +{ + struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi); + int err; + + err = ste_hsi_clock_enable(hsi); + if (unlikely(err)) + return err; + + writel(0, ste_hsi->tx_base + STE_HSI_TX_BUFSTATE); + writel(0, ste_hsi->tx_base + STE_HSI_TX_FLUSHBITS); + writel(0, ste_hsi->tx_base + STE_HSI_TX_PRIORITY); + writel(0, ste_hsi->tx_base + STE_HSI_TX_BURSTLEN); + writel(0, ste_hsi->tx_base + STE_HSI_TX_PREAMBLE); + writel(0, ste_hsi->tx_base + STE_HSI_TX_DATASWAP); + writel(0, ste_hsi->tx_base + STE_HSI_TX_DMAEN); + writel(0, ste_hsi->tx_base + STE_HSI_TX_WATERMARKID); + writel(0, ste_hsi->tx_base + STE_HSI_TX_WATERMARKIC); + writel(0, ste_hsi->tx_base + STE_HSI_TX_WATERMARKIM); + + /* 0x23 is reset value per DB8500 Design Spec */ + writel(0x23, ste_hsi->rx_base + STE_HSI_RX_THRESHOLD); + + writel(0, ste_hsi->rx_base + STE_HSI_RX_BUFSTATE); + + /* Bits 0,1,2 set to 1 to clear exception flags */ + writel(0x07, ste_hsi->rx_base + STE_HSI_RX_ACK); + + /* Bits 0..7 set to 1 to clear OVERRUN IRQ */ + writel(0xFF, ste_hsi->rx_base + STE_HSI_RX_OVERRUNACK); + + writel(0, ste_hsi->rx_base + STE_HSI_RX_DMAEN); + writel(0, ste_hsi->rx_base + STE_HSI_RX_WATERMARKIC); + writel(0, ste_hsi->rx_base + STE_HSI_RX_WATERMARKIM); + writel(0, ste_hsi->rx_base + STE_HSI_RX_OVERRUNIM); + + /* Flush all errors */ + writel(0, ste_hsi->rx_base + STE_HSI_RX_EXCEP); + + /* 2 is Flush state, no RX exception generated afterwards */ + writel(2, ste_hsi->rx_base + STE_HSI_RX_STATE); + + writel(0, ste_hsi->rx_base + STE_HSI_RX_EXCEPIM); + + ste_hsi_clock_disable(hsi); + + return err; +} + +static int __init ste_hsi_add_controller(struct hsi_controller *hsi, + struct platform_device *pdev) +{ + struct ste_hsi_controller *ste_hsi; + char overrun_name[] = "hsi_rx_overrun_chxxx"; + unsigned char i; + int err; + + ste_hsi = kzalloc(sizeof(struct ste_hsi_controller), GFP_KERNEL); + if (!ste_hsi) { + dev_err(&pdev->dev, "Not enough memory for ste_hsi!\n"); + return -ENOMEM; + } + + spin_lock_init(&ste_hsi->lock); + spin_lock_init(&ste_hsi->ck_lock); + INIT_DELAYED_WORK(&ste_hsi->clk_work, ste_hsi_delayed_disable_clock); + + hsi->id = pdev->id; + hsi->device.parent = &pdev->dev; + dev_set_name(&hsi->device, "ste-hsi.%d", hsi->id); + ste_hsi->dev = &hsi->device; + hsi_controller_set_drvdata(hsi, ste_hsi); + + /* Get and reserve resources for receiver */ + err = ste_hsi_get_iomem(pdev, "hsi_rx_base", &ste_hsi->rx_base, + &ste_hsi->rx_dma_base); + if (err) + goto err_free_mem; + dev_info(&pdev->dev, "hsi_rx_base = %p\n", ste_hsi->rx_base); + + /* Get and reserve resources for transmitter */ + err = ste_hsi_get_iomem(pdev, "hsi_tx_base", &ste_hsi->tx_base, + &ste_hsi->tx_dma_base); + if (err) + goto err_free_mem; + dev_info(&pdev->dev, "hsi_tx_base = %p\n", ste_hsi->tx_base); + + /* Get HSIT HSITXCLK clock */ + ste_hsi->tx_clk = clk_get(&pdev->dev, "hsit_hsitxclk"); + if (IS_ERR(ste_hsi->tx_clk)) { + dev_err(&hsi->device, "Couldn't get HSIT HSITXCLK clock\n"); + err = PTR_ERR(ste_hsi->tx_clk); + goto err_free_mem; + } + + /* Get HSIR HSIRXCLK clock */ + ste_hsi->rx_clk = clk_get(&pdev->dev, "hsir_hsirxclk"); + if (IS_ERR(ste_hsi->rx_clk)) { + dev_err(&hsi->device, "Couldn't get HSIR HSIRXCLK clock\n"); + err = PTR_ERR(ste_hsi->rx_clk); + goto err_clk_free; + } + + /* Get HSIT HCLK clock */ + ste_hsi->ssitx_clk = clk_get(&pdev->dev, "hsit_hclk"); + if (IS_ERR(ste_hsi->ssitx_clk)) { + dev_err(&hsi->device, "Couldn't get HSIT HCLK clock\n"); + err = PTR_ERR(ste_hsi->ssitx_clk); + goto err_clk_free; + } + + /* Get HSIR HCLK clock */ + ste_hsi->ssirx_clk = clk_get(&pdev->dev, "hsir_hclk"); + if (IS_ERR(ste_hsi->ssirx_clk)) { + dev_err(&hsi->device, "Couldn't get HSIR HCLK clock\n"); + err = PTR_ERR(ste_hsi->ssirx_clk); + goto err_clk_free; + } + + err = ste_hsi_clock_enable(hsi); + if (unlikely(err)) + goto err_clk_free; + + /* Check if controller is at specified address */ + if (compare_periphid(ste_hsir_periphid, + (u32 *) (ste_hsi->rx_base + 0xFE0), 8)) { + dev_err(&pdev->dev, "No hsir controller at = %p\n", + ste_hsi->rx_base); + err = -ENXIO; + goto err_clk_free; + } + + /* Check if controller is at specified address */ + if (compare_periphid(ste_hsit_periphid, + (u32 *) (ste_hsi->tx_base + 0xFE0), 8)) { + dev_err(&pdev->dev, "No hsit controller at = %p\n", + ste_hsi->tx_base); + err = -ENXIO; + goto err_clk_free; + } + ste_hsi_clock_disable(hsi); + + err = ste_hsi_hw_init(hsi); + if (err) { + dev_err(&pdev->dev, "Failed to init HSI controller!\n"); + goto err_clk_free; + } + + for (i = 0; i < STE_HSI_MAX_CHANNELS; i++) { + sprintf(overrun_name, "hsi_rx_overrun_ch%d", i); + err = ste_hsi_get_irq(pdev, overrun_name, ste_hsi_overrun_isr, + hsi, &ste_hsi->overrun_irq[i]); + if (err) + goto err_clk_free; + } + + err = ste_hsi_ports_init(hsi, pdev); + if (err) + goto err_clk_free; + + err = hsi_register_controller(hsi); + if (err) + goto err_clk_free; + + return 0; + +err_clk_free: + ste_hsi_clks_free(ste_hsi); +err_free_mem: + kfree(ste_hsi); + return err; +} + +static int ste_hsi_remove_controller(struct hsi_controller *hsi, + struct platform_device *pdev) +{ + struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi); + + ste_hsi_clks_free(ste_hsi); + hsi_unregister_controller(hsi); + + return 0; +} + +static int __init ste_hsi_probe(struct platform_device *pdev) +{ + struct hsi_controller *hsi; + struct ste_hsi_platform_data *pdata = pdev->dev.platform_data; + int err; + + if (!pdata) { + dev_err(&pdev->dev, "No HSI platform data!\n"); + return -EINVAL; + } + + hsi = hsi_alloc_controller(pdata->num_ports, GFP_KERNEL); + if (!hsi) { + dev_err(&pdev->dev, "No memory to allocate HSI controller!\n"); + return -ENOMEM; + } + platform_set_drvdata(pdev, hsi); + + err = ste_hsi_add_controller(hsi, pdev); + if (err < 0) { + dev_err(&pdev->dev, "Can't add HSI controller!\n"); + goto err_free_controller; + } + + if (pdata->use_dma) + ste_hsi_init_dma(pdata, hsi); + + return 0; + +err_free_controller: + platform_set_drvdata(pdev, NULL); + hsi_free_controller(hsi); + + return err; +} + +static int __exit ste_hsi_remove(struct platform_device *pdev) +{ + struct hsi_controller *hsi = platform_get_drvdata(pdev); + + ste_hsi_remove_controller(hsi, pdev); + hsi_free_controller(hsi); + + return 0; +} + +static struct platform_driver ste_hsi_driver __refdata = { + .driver = { + .name = "ste_hsi", + .owner = THIS_MODULE, + }, + .remove = __exit_p(ste_hsi_remove), +}; + +static int __init ste_hsi_init(void) +{ + return platform_driver_probe(&ste_hsi_driver, ste_hsi_probe); +} +module_init(ste_hsi_init) + +static void __exit ste_hsi_exit(void) +{ + platform_driver_unregister(&ste_hsi_driver); +} +module_exit(ste_hsi_exit) + +MODULE_AUTHOR("Lukasz Baj Date: Mon, 18 Apr 2011 15:15:20 +0100 Subject: drivers: hsi: Add CAIF-HSI glue layer Change-Id: Id0b4672be6a34cf7fca2494ebb6728b679ad9a63 Signed-off-by: Pawel Szyszuk Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/21042 Reviewed-by: Jonas ABERG Reviewed-by: Henrik CARLING Tested-by: Henrik CARLING --- drivers/hsi/clients/Kconfig | 6 + drivers/hsi/clients/Makefile | 1 + drivers/hsi/clients/cfhsi.c | 318 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 325 insertions(+) create mode 100644 drivers/hsi/clients/cfhsi.c diff --git a/drivers/hsi/clients/Kconfig b/drivers/hsi/clients/Kconfig index 3bacd275f47..46eef1f77fd 100644 --- a/drivers/hsi/clients/Kconfig +++ b/drivers/hsi/clients/Kconfig @@ -11,3 +11,9 @@ config HSI_CHAR If you say Y here, you will enable the HSI/SSI character driver. This driver provides a simple character device interface for serial communication with the cellular modem over HSI/SSI bus. +config HSI_CAIF + tristate "CAIF HSI driver" + depends on HSI + default n + ---help--- + Provides HSI-CAIF glue layer diff --git a/drivers/hsi/clients/Makefile b/drivers/hsi/clients/Makefile index 327c0e27c8b..dfe33584975 100644 --- a/drivers/hsi/clients/Makefile +++ b/drivers/hsi/clients/Makefile @@ -3,3 +3,4 @@ # obj-$(CONFIG_HSI_CHAR) += hsi_char.o +obj-$(CONFIG_HSI_CAIF) += cfhsi.o diff --git a/drivers/hsi/clients/cfhsi.c b/drivers/hsi/clients/cfhsi.c new file mode 100644 index 00000000000..cf7ce0cb1cb --- /dev/null +++ b/drivers/hsi/clients/cfhsi.c @@ -0,0 +1,318 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Daniel Martensson + * License terms: GNU General Public License (GPL) version 2. + */ + +#include +#include +#include +#include +#include + +#include + +#include + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Daniel Martensson"); +MODULE_DESCRIPTION("CAIF HSI V3 glue"); + +#define NR_OF_CAIF_HSI_CHANNELS 2 + +struct cfhsi_v3 { + struct list_head list; + struct cfhsi_dev dev; + struct platform_device pdev; + struct hsi_msg *tx_msg; + struct hsi_msg *rx_msg; +}; + +/* TODO: Lists are not protected with regards to device removal. */ +static LIST_HEAD(cfhsi_dev_list); + +static struct hsi_client *cfhsi_client; + +static int cfhsi_tx(u8 *ptr, int len, struct cfhsi_dev *dev) +{ + int res; + struct cfhsi_v3 *cfhsi = NULL; + + /* Check length and alignment. */ + BUG_ON(((int)ptr)%4); + BUG_ON(len%4); + + cfhsi = container_of(dev, struct cfhsi_v3, dev); + + sg_init_one(cfhsi->tx_msg->sgt.sgl, (const void *)ptr, + (unsigned int)len); + + /* Write on HSI device. */ + res = hsi_async_write(cfhsi_client, cfhsi->tx_msg); + + return res; +} + +static int cfhsi_rx(u8 *ptr, int len, struct cfhsi_dev *dev) +{ + int res; + struct cfhsi_v3 *cfhsi = NULL; + + /* Check length and alignment. */ + BUG_ON(((int)ptr)%4); + BUG_ON(len%4); + + cfhsi = container_of(dev, struct cfhsi_v3, dev); + + sg_init_one(cfhsi->rx_msg->sgt.sgl, (const void *)ptr, + (unsigned int)len); + + /* Read from HSI device. */ + res = hsi_async_read(cfhsi_client, cfhsi->rx_msg); + + return res; +} + +void cfhsi_v3_release(struct device *dev) +{ + pr_warning("%s:%d cfhsi_v3_release called\n", __FILE__, __LINE__); +} + +static inline void cfhsi_v3_destructor(struct hsi_msg *msg) +{ + pr_warning("%s:%d cfhsi_v3_destructor called\n", __FILE__, __LINE__); +} + +static inline void cfhsi_v3_read_cb(struct hsi_msg *msg) +{ + struct cfhsi_v3 *cfhsi = (struct cfhsi_v3 *)msg->context; + + /* TODO: Error checking. */ + BUG_ON(!cfhsi->dev.drv); + BUG_ON(!cfhsi->dev.drv->rx_done_cb); + + cfhsi->dev.drv->rx_done_cb(cfhsi->dev.drv); +} + +static inline void cfhsi_v3_write_cb(struct hsi_msg *msg) +{ + struct cfhsi_v3 *cfhsi = (struct cfhsi_v3 *)msg->context; + + /* TODO: Error checking. */ + BUG_ON(!cfhsi->dev.drv); + BUG_ON(!cfhsi->dev.drv->tx_done_cb); + + cfhsi->dev.drv->tx_done_cb(cfhsi->dev.drv); +} + +static int hsi_proto_probe(struct device *dev) +{ + int res; + int i; + struct cfhsi_v3 *cfhsi = NULL; + + if (cfhsi_client) + return -ENODEV; /* TODO: Not correct return. */ + + cfhsi_client = to_hsi_client(dev); + + res = hsi_claim_port(cfhsi_client, 0); + if (res) { + pr_warning("hsi_proto_probe: hsi_claim_port:%d.\n", res); + goto err_hsi_claim; + } + + /* Right now we don't care about AC_WAKE (No power management). */ + cfhsi_client->hsi_start_rx = NULL; + cfhsi_client->hsi_stop_rx = NULL; + + /* CAIF HSI TX configuration. */ + cfhsi_client->tx_cfg.mode = HSI_MODE_STREAM; + cfhsi_client->tx_cfg.flow = HSI_FLOW_SYNC; + cfhsi_client->tx_cfg.channels = NR_OF_CAIF_HSI_CHANNELS; + cfhsi_client->tx_cfg.speed = 100000; /* TODO: What speed should be used. */ + cfhsi_client->tx_cfg.arb_mode = HSI_ARB_RR; + + /* CAIF HSI RX configuration. */ + cfhsi_client->rx_cfg.mode = HSI_MODE_STREAM; + cfhsi_client->rx_cfg.flow = HSI_FLOW_SYNC; + cfhsi_client->rx_cfg.channels = NR_OF_CAIF_HSI_CHANNELS; + cfhsi_client->rx_cfg.speed = 200000; /* TODO: What speed should be used. */ + cfhsi_client->rx_cfg.arb_mode = HSI_ARB_RR; + + res = hsi_setup(cfhsi_client); + if (res) { + pr_warning("hsi_proto_probe: hsi_setup:%d.\n", res); + goto err_hsi_setup; + } + + /* Make sure that AC_WAKE is high (No power management). */ + res = hsi_start_tx(cfhsi_client); + if (res) { + pr_warning("hsi_proto_probe: hsi_start_tx:%d.\n", res); + goto err_hsi_start_tx; + } + + /* Connect channels to CAIF HSI devices. */ + for (i = 0; i < NR_OF_CAIF_HSI_CHANNELS; i++) { + cfhsi = kzalloc(sizeof(struct cfhsi_v3), GFP_KERNEL); + if (!cfhsi) { + res = -ENOMEM; + /* TODO: Error handling. */ + } + + /* Assign HSI client to this CAIF HSI device. */ + cfhsi->dev.cfhsi_tx = cfhsi_tx; + cfhsi->dev.cfhsi_rx = cfhsi_rx; + + /* Allocate HSI messages. */ + cfhsi->tx_msg = hsi_alloc_msg(1, GFP_KERNEL); + cfhsi->rx_msg = hsi_alloc_msg(1, GFP_KERNEL); + if (!cfhsi->tx_msg || !cfhsi->rx_msg) { + res = -ENOMEM; + /* TODO: Error handling. */ + } + + /* Set up TX message. */ + cfhsi->tx_msg->cl = cfhsi_client; + cfhsi->tx_msg->context = (void *)cfhsi; + cfhsi->tx_msg->complete = cfhsi_v3_write_cb; + cfhsi->tx_msg->destructor = cfhsi_v3_destructor; + cfhsi->tx_msg->channel = i; + cfhsi->tx_msg->ttype = HSI_MSG_WRITE; + cfhsi->tx_msg->break_frame = 0; /* No break frame. */ + + /* Set up RX message. */ + cfhsi->rx_msg->cl = cfhsi_client; + cfhsi->rx_msg->context = (void *)cfhsi; + cfhsi->rx_msg->complete = cfhsi_v3_read_cb; + cfhsi->rx_msg->destructor = cfhsi_v3_destructor; + cfhsi->rx_msg->channel = i; + cfhsi->rx_msg->ttype = HSI_MSG_READ; + cfhsi->rx_msg->break_frame = 0; /* No break frame. */ + + /* Initialize CAIF HSI platform device. */ + cfhsi->pdev.name = "cfhsi"; + cfhsi->pdev.dev.platform_data = &cfhsi->dev; + cfhsi->pdev.dev.release = cfhsi_v3_release; + /* Use channel number as id. */ + cfhsi->pdev.id = i; + /* Register platform device. */ + res = platform_device_register(&cfhsi->pdev); + if (res) { + pr_warning("hsi_proto_probe: plat_dev_reg:%d.\n", res); + res = -ENODEV; + /* TODO: Error handling. */ + } + + /* Add HSI device to device list. */ + list_add_tail(&cfhsi->list, &cfhsi_dev_list); + } + + return res; + + err_hsi_start_tx: + err_hsi_setup: + hsi_release_port(cfhsi_client); + err_hsi_claim: + cfhsi_client = NULL; + + return res; +} + +static int hsi_proto_remove(struct device *dev) +{ + struct cfhsi_v3 *cfhsi = NULL; + struct list_head *list_node; + struct list_head *n; + + if (!cfhsi_client) + return -ENODEV; + + list_for_each_safe(list_node, n, &cfhsi_dev_list) { + cfhsi = list_entry(list_node, struct cfhsi_v3, list); + /* Remove from list. */ + list_del(list_node); + /* Our HSI device is gone, unregister CAIF HSI device. */ + platform_device_del(&cfhsi->pdev); + hsi_free_msg(cfhsi->tx_msg); + hsi_free_msg(cfhsi->rx_msg); + /* Free memory. */ + kfree(cfhsi); + } + + hsi_stop_tx(cfhsi_client); + hsi_release_port(cfhsi_client); + + cfhsi_client = NULL; + + return 0; +} + +static int hsi_proto_suspend(struct device *dev, pm_message_t mesg) +{ + /* Not handled. */ + pr_info("hsi_proto_suspend.\n"); + + return 0; +} + +static int hsi_proto_resume(struct device *dev) +{ + /* Not handled. */ + pr_info("hsi_proto_resume.\n"); + + return 0; +} + +static struct hsi_client_driver cfhsi_v3_driver = { + .driver = { + .name = "cfhsi_v3_driver", + .owner = THIS_MODULE, + .probe = hsi_proto_probe, + .remove = __devexit_p(hsi_proto_remove), + .suspend = hsi_proto_suspend, + .resume = hsi_proto_resume, + }, +}; + +static int __init cfhsi_v3_init(void) +{ + int res; + + /* Register protocol driver for HSI interface. */ + res = hsi_register_client_driver(&cfhsi_v3_driver); + if (res) + pr_warning("Failed to register CAIF HSI V3 driver.\n"); + + return res; +} + +static void __exit cfhsi_v3_exit(void) +{ + struct cfhsi_v3 *cfhsi = NULL; + struct list_head *list_node; + struct list_head *n; + + /* Unregister driver. */ + hsi_unregister_client_driver(&cfhsi_v3_driver); + + if (!cfhsi_client) + return; + + list_for_each_safe(list_node, n, &cfhsi_dev_list) { + cfhsi = list_entry(list_node, struct cfhsi_v3, list); + platform_device_del(&cfhsi->pdev); + hsi_free_msg(cfhsi->tx_msg); + hsi_free_msg(cfhsi->rx_msg); + kfree(cfhsi); + } + + hsi_stop_tx(cfhsi_client); + hsi_release_port(cfhsi_client); + + cfhsi_client = NULL; +} + +module_init(cfhsi_v3_init); +module_exit(cfhsi_v3_exit); -- cgit v1.2.3 From 5be556d75c33d54f45a6766e7a4b8c1926e55a47 Mon Sep 17 00:00:00 2001 From: repo sync Date: Tue, 10 May 2011 16:36:58 +0100 Subject: drivers: hsi: context save/restore for STE HSI ST-Ericsson ID: ER338229 Change-Id: I860d98414ee603ab8b992d6688ff02de7cc6fd98 Signed-off-by: Pawel Szyszuk Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/22821 Reviewed-by: Alex MACRO Reviewed-by: QATEST Reviewed-by: Chris KIMBER Reviewed-by: Andrew LYNN --- drivers/hsi/controllers/ste_hsi.c | 325 ++++++++++++++++++++++++++++---------- 1 file changed, 244 insertions(+), 81 deletions(-) diff --git a/drivers/hsi/controllers/ste_hsi.c b/drivers/hsi/controllers/ste_hsi.c index 080a57ccade..8ac1363c14c 100644 --- a/drivers/hsi/controllers/ste_hsi.c +++ b/drivers/hsi/controllers/ste_hsi.c @@ -13,6 +13,7 @@ #include #include #include +#include #ifdef CONFIG_STE_DMA40 #include @@ -21,30 +22,57 @@ #include +/* + * Copy of HSIR/HSIT context for restoring after HW reset (Vape power off). + */ +struct ste_hsi_hw_context { + unsigned int tx_mode; + unsigned int tx_divisor; + unsigned int tx_channels; + unsigned int rx_mode; + unsigned int rx_channels; +}; + /** - * struct ste_hsi_controller - Nomadik HSI controller data - * @dev: device associated to the controller (HSI controller) - * @rx_base: HSI receiver registers base address - * @tx_base: HSI transmitter registers base address + * struct ste_hsi_controller - STE HSI controller data + * @dev: device associated to STE HSI controller + * @tx_dma_base: HSI TX peripheral physical address + * @rx_dma_base: HSI RX peripheral physical address + * @rx_base: HSI RX peripheral virtual address + * @tx_base: HSI TX peripheral virtual address + * @regulator: STE HSI Vape consumer regulator + * @context: copy of client-configured HSI TX / HSI RX registers + * @tx_clk: HSI TX core clock (HSITXCLK) + * @rx_clk: HSI RX core clock (HSIRXCLK) + * @ssitx_clk: HSI TX host clock (HCLK) + * @ssirx_clk: HSI RX host clock (HCLK) + * @clk_work: structure for delayed HSI clock disabling + * @overrun_irq: HSI channels overrun IRQ table + * @ck_refcount: reference count for clock enable operation + * @ck_lock: locking primitive for HSI clocks + * @lock: locking primitive for HSI controller + * @use_dma: flag for DMA enabled + * @ck_on: flag for HSI clocks enabled */ struct ste_hsi_controller { struct device *dev; + dma_addr_t tx_dma_base; + dma_addr_t rx_dma_base; + unsigned char __iomem *rx_base; + unsigned char __iomem *tx_base; + struct regulator *regulator; + struct ste_hsi_hw_context *context; struct clk *tx_clk; struct clk *rx_clk; struct clk *ssitx_clk; struct clk *ssirx_clk; struct delayed_work clk_work; - unsigned char __iomem *rx_base; - unsigned char __iomem *tx_base; int overrun_irq[STE_HSI_MAX_CHANNELS]; int ck_refcount; spinlock_t ck_lock; spinlock_t lock; unsigned int use_dma:1; unsigned int ck_on:1; - /* physical address of rx and tx controller */ - dma_addr_t rx_dma_base; - dma_addr_t tx_dma_base; }; #ifdef CONFIG_STE_DMA40 @@ -107,6 +135,146 @@ static void ste_hsi_clk_free(struct clk **pclk) *pclk = NULL; } +static void ste_hsi_init_registers(struct ste_hsi_controller *ste_hsi) +{ + writel(0, ste_hsi->tx_base + STE_HSI_TX_BUFSTATE); + writel(0, ste_hsi->tx_base + STE_HSI_TX_FLUSHBITS); + /* TO DO: TX channel priorities will be implemented later */ + writel(0, ste_hsi->tx_base + STE_HSI_TX_PRIORITY); + writel(0, ste_hsi->tx_base + STE_HSI_TX_BURSTLEN); + writel(0, ste_hsi->tx_base + STE_HSI_TX_PREAMBLE); + writel(0, ste_hsi->tx_base + STE_HSI_TX_DATASWAP); + writel(0, ste_hsi->tx_base + STE_HSI_TX_DMAEN); + writel(0, ste_hsi->tx_base + STE_HSI_TX_WATERMARKID); + writel(0, ste_hsi->tx_base + STE_HSI_TX_WATERMARKIC); + writel(0, ste_hsi->tx_base + STE_HSI_TX_WATERMARKIM); + + /* 0x23 is reset value per DB8500 Design Spec */ + writel(0x23, ste_hsi->rx_base + STE_HSI_RX_THRESHOLD); + + writel(0, ste_hsi->rx_base + STE_HSI_RX_BUFSTATE); + + /* HSIR clock recovery mode */ + writel(0, ste_hsi->rx_base + STE_HSI_RX_DETECTOR); + + /* Bits 0,1,2 set to 1 to clear exception flags */ + writel(0x07, ste_hsi->rx_base + STE_HSI_RX_ACK); + + /* Bits 0..7 set to 1 to clear OVERRUN IRQ */ + writel(0xFF, ste_hsi->rx_base + STE_HSI_RX_OVERRUNACK); + + writel(0, ste_hsi->rx_base + STE_HSI_RX_DMAEN); + writel(0, ste_hsi->rx_base + STE_HSI_RX_WATERMARKIC); + writel(0, ste_hsi->rx_base + STE_HSI_RX_WATERMARKIM); + writel(0, ste_hsi->rx_base + STE_HSI_RX_OVERRUNIM); + + /* Flush all errors */ + writel(0, ste_hsi->rx_base + STE_HSI_RX_EXCEP); + + /* 2 is Flush state, no RX exception generated afterwards */ + writel(2, ste_hsi->rx_base + STE_HSI_RX_STATE); + + writel(0, ste_hsi->rx_base + STE_HSI_RX_EXCEPIM); +} + +static void ste_hsi_setup_registers(struct ste_hsi_controller *ste_hsi) +{ + unsigned int buffers, i; + struct ste_hsi_hw_context *pcontext = ste_hsi->context; + + /* + * Configure TX + */ + writel(pcontext->tx_mode, ste_hsi->tx_base + STE_HSI_TX_MODE); + writel(pcontext->tx_divisor, ste_hsi->tx_base + STE_HSI_TX_DIVISOR); + writel(0, ste_hsi->tx_base + STE_HSI_TX_PARITY); + writel(pcontext->tx_channels, ste_hsi->tx_base + STE_HSI_TX_CHANNELS); + /* Calculate buffers number per channel */ + buffers = STE_HSI_MAX_BUFFERS / pcontext->tx_channels; + for (i = 0; i < pcontext->tx_channels; i++) { + /* Set 32 bit long frames */ + writel(31, ste_hsi->tx_base + STE_HSI_TX_FRAMELENX + 4 * i); + writel(buffers * i, + ste_hsi->tx_base + STE_HSI_TX_BASEX + 4 * i); + writel(buffers - 1, + ste_hsi->tx_base + STE_HSI_TX_SPANX + 4 * i); + writel(buffers - 1, + ste_hsi->tx_base + STE_HSI_TX_WATERMARKX + 4 * i); + writel(0, ste_hsi->tx_base + STE_HSI_TX_WATERMARKX + 4 * i); + } + + /* + * The value read from this register gives the synchronized status + * of the transmitter state and this synchronization takes 2 HSITCLK + * cycles plus 3 HCLK cycles. + */ + while (STE_HSI_STATE_IDLE != readl(ste_hsi->tx_base + STE_HSI_TX_STATE)) + cpu_relax(); + + /* + * Configure RX + */ + writel(pcontext->rx_mode, ste_hsi->rx_base + STE_HSI_RX_MODE); + writel(0, ste_hsi->rx_base + STE_HSI_RX_PARITY); + writel(pcontext->rx_channels, ste_hsi->rx_base + STE_HSI_RX_CHANNELS); + /* Calculate buffers number per channel */ + buffers = STE_HSI_MAX_BUFFERS / pcontext->rx_channels; + for (i = 0; i < pcontext->rx_channels; i++) { + /* Set 32 bit long frames */ + writel(31, ste_hsi->rx_base + STE_HSI_RX_FRAMELENX + 4 * i); + writel(buffers * i, + ste_hsi->rx_base + STE_HSI_RX_BASEX + 4 * i); + writel(buffers - 1, + ste_hsi->rx_base + STE_HSI_RX_SPANX + 4 * i); + writel(buffers - 1, + ste_hsi->rx_base + STE_HSI_RX_WATERMARKX + 4 * i); + writel(0, ste_hsi->rx_base + STE_HSI_RX_WATERMARKX + 4 * i); + } + + /* + * The value read from this register gives the synchronized status + * of the receiver state and this synchronization takes 2 HSIRCLK + * cycles plus 3 HCLK cycles. + */ + while (STE_HSI_STATE_IDLE != readl(ste_hsi->tx_base + STE_HSI_RX_STATE)) + cpu_relax(); +} + +/* + * When cpuidle framework is setting the sleep or deep sleep state then + * the Vape is OFF. This results in re-setting the HSIT/HSIR registers + * to default (idle) values. + * Function ste_hsi_context() is checking and restoring the HSI registers + * to these set by the HSI client by ste_hsi_setup(). + */ +static void ste_hsi_context(struct ste_hsi_controller *ste_hsi) +{ + unsigned int tx_channels; + unsigned int rx_channels; + + + tx_channels = readl(ste_hsi->tx_base + STE_HSI_TX_CHANNELS); + rx_channels = readl(ste_hsi->rx_base + STE_HSI_RX_CHANNELS); + + /* + * Checking if the context was lost. + * The target config is at least 2 channels for both TX and RX. + * TX and RX channels are set to 1 after HW reset. + */ + if ((ste_hsi->context->tx_channels != tx_channels) || + (ste_hsi->context->rx_channels != rx_channels)) { + /* + * TO DO: remove "dev_info" after thorough testing. + * Debug left for getting the statistics how frequently the context + * is lost during regular HSI operation. + */ + dev_info(ste_hsi->dev, "context\n"); + + ste_hsi_init_registers(ste_hsi); + ste_hsi_setup_registers(ste_hsi); + } +} + static void ste_hsi_clks_free(struct ste_hsi_controller *ste_hsi) { ste_hsi_clk_free(&ste_hsi->rx_clk); @@ -129,13 +297,16 @@ static int ste_hsi_clock_enable(struct hsi_controller *hsi) goto out; err = clk_enable(ste_hsi->ssitx_clk); - if (unlikely(err)) + if (unlikely(err)) { clk_disable(ste_hsi->ssirx_clk); + goto out; + } err = clk_enable(ste_hsi->rx_clk); if (unlikely(err)) { clk_disable(ste_hsi->ssitx_clk); clk_disable(ste_hsi->ssirx_clk); + goto out; } err = clk_enable(ste_hsi->tx_clk); @@ -143,6 +314,7 @@ static int ste_hsi_clock_enable(struct hsi_controller *hsi) clk_disable(ste_hsi->rx_clk); clk_disable(ste_hsi->ssitx_clk); clk_disable(ste_hsi->ssirx_clk); + goto out; } ste_hsi->ck_on = 1; @@ -242,6 +414,8 @@ static int ste_hsi_start_irq(struct hsi_msg *msg) if (unlikely(err)) return err; + ste_hsi_context(ste_hsi); + msg->actual_len = 0; msg->status = HSI_STATUS_PROCEEDING; @@ -362,6 +536,8 @@ static int ste_hsi_start_dma(struct hsi_msg *msg) if (unlikely(err)) return err; + ste_hsi_context(ste_hsi); + if (msg->ttype == HSI_MSG_WRITE) { direction = DMA_TO_DEVICE; dma_enable_address = ste_hsi->tx_base + STE_HSI_TX_DMAEN; @@ -1076,9 +1252,12 @@ static int ste_hsi_setup(struct hsi_client *cl) struct ste_hsi_port *ste_port = hsi_port_drvdata(port); struct hsi_controller *hsi = to_hsi_controller(port->device.parent); struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi); - int err, i, buffers; + int err; u32 div = 0; + if (ste_hsi->regulator) + regulator_enable(ste_hsi->regulator); + err = ste_hsi_clock_enable(hsi); if (unlikely(err)) return err; @@ -1089,51 +1268,35 @@ static int ste_hsi_setup(struct hsi_client *cl) --div; } + if (!ste_hsi->context) + ste_hsi->context = kzalloc(sizeof(struct ste_hsi_hw_context), GFP_KERNEL); + + if (!ste_hsi->context) { + dev_err(ste_hsi->dev, "Not enough memory for context!\n"); + return -ENOMEM; + } else { + /* Save HSI context */ + ste_hsi->context->tx_mode = cl->tx_cfg.mode; + ste_hsi->context->tx_divisor = div; + ste_hsi->context->tx_channels = cl->tx_cfg.channels; + ste_hsi->context->rx_mode = cl->rx_cfg.mode; + ste_hsi->context->rx_channels = cl->rx_cfg.channels; + } + port->tx_cfg = cl->tx_cfg; port->rx_cfg = cl->rx_cfg; - /* Configure TX */ - writel(cl->tx_cfg.mode, ste_hsi->tx_base + STE_HSI_TX_MODE); - writel(div, ste_hsi->tx_base + STE_HSI_TX_DIVISOR); - writel(0, ste_hsi->tx_base + STE_HSI_TX_PARITY); - /* TODO: Wait for idle here */ - writel(cl->tx_cfg.channels, ste_hsi->tx_base + STE_HSI_TX_CHANNELS); - /* Calculate buffers number per channel */ - buffers = STE_HSI_MAX_BUFFERS / cl->tx_cfg.channels; - for (i = 0; i < cl->tx_cfg.channels; i++) { - /* Set 32 bit long frames */ - writel(31, ste_hsi->tx_base + STE_HSI_TX_FRAMELENX + 4 * i); - writel(buffers * i, - ste_hsi->tx_base + STE_HSI_TX_BASEX + 4 * i); - writel(buffers - 1, - ste_hsi->tx_base + STE_HSI_TX_SPANX + 4 * i); - writel(buffers - 1, - ste_hsi->tx_base + STE_HSI_TX_WATERMARKX + 4 * i); - writel(0, ste_hsi->tx_base + STE_HSI_TX_WATERMARKX + 4 * i); - } - /* Configure RX */ - writel(cl->rx_cfg.mode, ste_hsi->rx_base + STE_HSI_RX_MODE); - writel(0, ste_hsi->rx_base + STE_HSI_RX_PARITY); - writel(cl->rx_cfg.channels, ste_hsi->rx_base + STE_HSI_RX_CHANNELS); - /* Calculate buffers number per channel */ - buffers = STE_HSI_MAX_BUFFERS / cl->rx_cfg.channels; - for (i = 0; i < cl->rx_cfg.channels; i++) { - /* Set 32 bit long frames */ - writel(31, ste_hsi->rx_base + STE_HSI_RX_FRAMELENX + 4 * i); - writel(buffers * i, - ste_hsi->rx_base + STE_HSI_RX_BASEX + 4 * i); - writel(buffers - 1, - ste_hsi->rx_base + STE_HSI_RX_SPANX + 4 * i); - writel(buffers - 1, - ste_hsi->rx_base + STE_HSI_RX_WATERMARKX + 4 * i); - writel(0, ste_hsi->rx_base + STE_HSI_RX_WATERMARKX + 4 * i); - } + ste_hsi_setup_registers(ste_hsi); ste_port->channels = max(cl->tx_cfg.channels, cl->rx_cfg.channels); ste_hsi_setup_dma(cl); ste_hsi_clock_disable(hsi); + + if (ste_hsi->regulator) + regulator_disable(ste_hsi->regulator); + return err; } @@ -1200,11 +1363,25 @@ static int ste_hsi_flush(struct hsi_client *cl) static int ste_hsi_start_tx(struct hsi_client *cl) { + struct hsi_port *port = to_hsi_port(cl->device.parent); + struct hsi_controller *hsi = to_hsi_controller(port->device.parent); + struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi); + + if (ste_hsi->regulator) + regulator_enable(ste_hsi->regulator); + return 0; } static int ste_hsi_stop_tx(struct hsi_client *cl) { + struct hsi_port *port = to_hsi_port(cl->device.parent); + struct hsi_controller *hsi = to_hsi_controller(port->device.parent); + struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi); + + if (ste_hsi->regulator) + regulator_disable(ste_hsi->regulator); + return 0; } @@ -1288,40 +1465,7 @@ static int __init ste_hsi_hw_init(struct hsi_controller *hsi) if (unlikely(err)) return err; - writel(0, ste_hsi->tx_base + STE_HSI_TX_BUFSTATE); - writel(0, ste_hsi->tx_base + STE_HSI_TX_FLUSHBITS); - writel(0, ste_hsi->tx_base + STE_HSI_TX_PRIORITY); - writel(0, ste_hsi->tx_base + STE_HSI_TX_BURSTLEN); - writel(0, ste_hsi->tx_base + STE_HSI_TX_PREAMBLE); - writel(0, ste_hsi->tx_base + STE_HSI_TX_DATASWAP); - writel(0, ste_hsi->tx_base + STE_HSI_TX_DMAEN); - writel(0, ste_hsi->tx_base + STE_HSI_TX_WATERMARKID); - writel(0, ste_hsi->tx_base + STE_HSI_TX_WATERMARKIC); - writel(0, ste_hsi->tx_base + STE_HSI_TX_WATERMARKIM); - - /* 0x23 is reset value per DB8500 Design Spec */ - writel(0x23, ste_hsi->rx_base + STE_HSI_RX_THRESHOLD); - - writel(0, ste_hsi->rx_base + STE_HSI_RX_BUFSTATE); - - /* Bits 0,1,2 set to 1 to clear exception flags */ - writel(0x07, ste_hsi->rx_base + STE_HSI_RX_ACK); - - /* Bits 0..7 set to 1 to clear OVERRUN IRQ */ - writel(0xFF, ste_hsi->rx_base + STE_HSI_RX_OVERRUNACK); - - writel(0, ste_hsi->rx_base + STE_HSI_RX_DMAEN); - writel(0, ste_hsi->rx_base + STE_HSI_RX_WATERMARKIC); - writel(0, ste_hsi->rx_base + STE_HSI_RX_WATERMARKIM); - writel(0, ste_hsi->rx_base + STE_HSI_RX_OVERRUNIM); - - /* Flush all errors */ - writel(0, ste_hsi->rx_base + STE_HSI_RX_EXCEP); - - /* 2 is Flush state, no RX exception generated afterwards */ - writel(2, ste_hsi->rx_base + STE_HSI_RX_STATE); - - writel(0, ste_hsi->rx_base + STE_HSI_RX_EXCEPIM); + ste_hsi_init_registers(ste_hsi); ste_hsi_clock_disable(hsi); @@ -1352,6 +1496,15 @@ static int __init ste_hsi_add_controller(struct hsi_controller *hsi, ste_hsi->dev = &hsi->device; hsi_controller_set_drvdata(hsi, ste_hsi); + /* Get and enable regulator */ + ste_hsi->regulator = regulator_get(&pdev->dev, "v-hsi"); + if (IS_ERR(ste_hsi->regulator)) { + dev_err(&pdev->dev, "could not get v-hsi regulator\n"); + ste_hsi->regulator = NULL; + } else { + regulator_enable(ste_hsi->regulator); + } + /* Get and reserve resources for receiver */ err = ste_hsi_get_iomem(pdev, "hsi_rx_base", &ste_hsi->rx_base, &ste_hsi->rx_dma_base); @@ -1440,6 +1593,10 @@ static int __init ste_hsi_add_controller(struct hsi_controller *hsi, goto err_clk_free; err = hsi_register_controller(hsi); + + if (ste_hsi->regulator) + regulator_disable(ste_hsi->regulator); + if (err) goto err_clk_free; @@ -1457,9 +1614,15 @@ static int ste_hsi_remove_controller(struct hsi_controller *hsi, { struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi); + if (ste_hsi->regulator) + regulator_put(ste_hsi->regulator); + ste_hsi_clks_free(ste_hsi); hsi_unregister_controller(hsi); + kfree(ste_hsi->context); + kfree(ste_hsi); + return 0; } -- cgit v1.2.3 From 34b45698157f41b89227c4043e97f05aeb520bb7 Mon Sep 17 00:00:00 2001 From: Pawel Szyszuk Date: Thu, 7 Jul 2011 15:16:33 +0100 Subject: ARM: U8500: ST-E HSI: DMA burst and PIPE support Added support for HSIR PIPELINED mode (also in drivers/hsi) Added DMA max burst (in DMA words) as a parameter in hsi.h DMA channel high priority set to TRUE for HSI ST-Ericsson ID: 356625 ST-Ericsson Linux next: NA ST-Ericsson FOSS-OUT ID: Trivial Change-Id: Id842e61343ce5013992337db085fcbe91dd5b9f7 Signed-off-by: Pawel Szyszuk Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/32180 Reviewed-by: Yann GAUTIER Reviewed-by: Andrew LYNN --- arch/arm/mach-ux500/include/mach/hsi.h | 5 +++- drivers/hsi/controllers/ste_hsi.c | 51 ++++++++++++++++++++++++++++------ 2 files changed, 47 insertions(+), 9 deletions(-) diff --git a/arch/arm/mach-ux500/include/mach/hsi.h b/arch/arm/mach-ux500/include/mach/hsi.h index 030e35e729b..1d2ab206e27 100644 --- a/arch/arm/mach-ux500/include/mach/hsi.h +++ b/arch/arm/mach-ux500/include/mach/hsi.h @@ -63,6 +63,7 @@ #define STE_HSI_RX_SPANX 0x140 #define STE_HSI_RX_GAUGEX 0x180 #define STE_HSI_RX_WATERMARKX 0x1C0 +#define STE_HSI_RX_FRAMEBURSTCNT 0x1E0 #define STE_HSI_RX_DMAEN 0x200 #define STE_HSI_RX_WATERMARKIS 0x204 #define STE_HSI_RX_WATERMARKIM 0x208 @@ -101,7 +102,9 @@ #define STE_HSI_MAX_BUFFERS 32 /* Max channels of STE HSI controller */ -#define STE_HSI_MAX_CHANNELS 4 +#define STE_HSI_MAX_CHANNELS 2 + +#define STE_HSI_DMA_MAX_BURST 1 struct stedma40_chan_cfg; diff --git a/drivers/hsi/controllers/ste_hsi.c b/drivers/hsi/controllers/ste_hsi.c index 8ac1363c14c..a9cd343fd53 100644 --- a/drivers/hsi/controllers/ste_hsi.c +++ b/drivers/hsi/controllers/ste_hsi.c @@ -198,9 +198,19 @@ static void ste_hsi_setup_registers(struct ste_hsi_controller *ste_hsi) ste_hsi->tx_base + STE_HSI_TX_BASEX + 4 * i); writel(buffers - 1, ste_hsi->tx_base + STE_HSI_TX_SPANX + 4 * i); - writel(buffers - 1, - ste_hsi->tx_base + STE_HSI_TX_WATERMARKX + 4 * i); + + /* + * The DMA burst request and the buffer occupation interrupt are + * asserted when the free space in the corresponding channel buffer + * is greater than the value programmed in TX_WATERMARKX field. + * The field value must be less than the corresponding SPAN value. + */ +#ifdef CONFIG_STE_DMA40 + writel(STE_HSI_DMA_MAX_BURST-1, + ste_hsi->tx_base + STE_HSI_TX_WATERMARKX + 4 * i); +#else /* IRQ mode */ writel(0, ste_hsi->tx_base + STE_HSI_TX_WATERMARKX + 4 * i); +#endif } /* @@ -215,7 +225,16 @@ static void ste_hsi_setup_registers(struct ste_hsi_controller *ste_hsi) * Configure RX */ writel(pcontext->rx_mode, ste_hsi->rx_base + STE_HSI_RX_MODE); - writel(0, ste_hsi->rx_base + STE_HSI_RX_PARITY); + + if (STE_HSI_MODE_PIPELINED == pcontext->rx_mode) + /* + * 0x0F: The READY line is negated after the start of the + * 16th frame reception in PIPELINED mode. + */ + writel(0x0F, ste_hsi->rx_base + STE_HSI_RX_FRAMEBURSTCNT); + else + writel(0, ste_hsi->rx_base + STE_HSI_RX_FRAMEBURSTCNT); + writel(pcontext->rx_channels, ste_hsi->rx_base + STE_HSI_RX_CHANNELS); /* Calculate buffers number per channel */ buffers = STE_HSI_MAX_BUFFERS / pcontext->rx_channels; @@ -226,9 +245,19 @@ static void ste_hsi_setup_registers(struct ste_hsi_controller *ste_hsi) ste_hsi->rx_base + STE_HSI_RX_BASEX + 4 * i); writel(buffers - 1, ste_hsi->rx_base + STE_HSI_RX_SPANX + 4 * i); - writel(buffers - 1, - ste_hsi->rx_base + STE_HSI_RX_WATERMARKX + 4 * i); + + /* + * The DMA burst request and the buffer occupation interrupt are + * asserted when the busy space in the corresponding channel buffer + * is greater than the value programmed in RX_WATERMARKX field. + * The field value must be less than the corresponding SPAN value. + */ +#ifdef CONFIG_STE_DMA40 + writel(STE_HSI_DMA_MAX_BURST-1, + ste_hsi->rx_base + STE_HSI_RX_WATERMARKX + 4 * i); +#else /* IRQ mode */ writel(0, ste_hsi->rx_base + STE_HSI_RX_WATERMARKX + 4 * i); +#endif } /* @@ -642,13 +671,13 @@ static int ste_hsi_setup_dma(struct hsi_client *cl) .src_addr = 0, /* dynamic data */ .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, .direction = DMA_FROM_DEVICE, - .src_maxburst = 1, + .src_maxburst = STE_HSI_DMA_MAX_BURST, }; struct dma_slave_config tx_conf = { .dst_addr = 0, /* dynamic data */ .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, .direction = DMA_TO_DEVICE, - .dst_maxburst = 1, + .dst_maxburst = STE_HSI_DMA_MAX_BURST, }; if (!ste_hsi->use_dma) @@ -1279,7 +1308,13 @@ static int ste_hsi_setup(struct hsi_client *cl) ste_hsi->context->tx_mode = cl->tx_cfg.mode; ste_hsi->context->tx_divisor = div; ste_hsi->context->tx_channels = cl->tx_cfg.channels; - ste_hsi->context->rx_mode = cl->rx_cfg.mode; + + if ((HSI_FLOW_PIPE == cl->rx_cfg.flow) && + (HSI_MODE_FRAME == cl->rx_cfg.mode)) + ste_hsi->context->rx_mode = STE_HSI_MODE_PIPELINED; + else + ste_hsi->context->rx_mode = cl->rx_cfg.mode; + ste_hsi->context->rx_channels = cl->rx_cfg.channels; } -- cgit v1.2.3 From f5d2a69ae3283d349c1e931a224822d13fc03006 Mon Sep 17 00:00:00 2001 From: Pawel Szyszuk Date: Thu, 22 Sep 2011 15:58:20 +0100 Subject: ARM: U8500: ST-E HSI: RX FRAMEBURSTCNT set to 0xFF HSIR FRAMEBURSTCNT register set to the max value 0xFF (255). ST-Ericsson Linux next: NA ST-Ericsson ID: 361371 ST-Ericsson FOSS-OUT ID: Trivial Change-Id: I095f113633d873a60a5622b1d195c28a2b01ceab Signed-off-by: Pawel Szyszuk Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/32216 Reviewed-by: Christopher BLAIR Reviewed-by: Derek MORTON Reviewed-by: Andrew LYNN --- drivers/hsi/controllers/ste_hsi.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/hsi/controllers/ste_hsi.c b/drivers/hsi/controllers/ste_hsi.c index a9cd343fd53..07b39bb2868 100644 --- a/drivers/hsi/controllers/ste_hsi.c +++ b/drivers/hsi/controllers/ste_hsi.c @@ -228,10 +228,10 @@ static void ste_hsi_setup_registers(struct ste_hsi_controller *ste_hsi) if (STE_HSI_MODE_PIPELINED == pcontext->rx_mode) /* - * 0x0F: The READY line is negated after the start of the - * 16th frame reception in PIPELINED mode. + * 0xFF: The READY line is negated after the start of the + * 256th frame reception in PIPELINED mode. */ - writel(0x0F, ste_hsi->rx_base + STE_HSI_RX_FRAMEBURSTCNT); + writel(0xFF, ste_hsi->rx_base + STE_HSI_RX_FRAMEBURSTCNT); else writel(0, ste_hsi->rx_base + STE_HSI_RX_FRAMEBURSTCNT); -- cgit v1.2.3 From 5174d0306e48e9ddd0d5cc663c7b9dc910fe5db3 Mon Sep 17 00:00:00 2001 From: Pawel Szyszuk Date: Mon, 3 Oct 2011 10:00:43 +0100 Subject: ARM: U9500: HSI core clocks initialization ST-Ericsson Linux next: NA ST-Ericsson ID: 343481 ST-Ericsson FOSS-OUT ID: Trivial Change-Id: Ic59f08e1bb5d858344d95b5d34ae8a5d85d9aa1d Signed-off-by: Pawel Szyszuk Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/32518 Reviewed-by: Andrew LYNN --- drivers/hsi/controllers/ste_hsi.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/drivers/hsi/controllers/ste_hsi.c b/drivers/hsi/controllers/ste_hsi.c index 07b39bb2868..a2cbb2108f7 100644 --- a/drivers/hsi/controllers/ste_hsi.c +++ b/drivers/hsi/controllers/ste_hsi.c @@ -1514,6 +1514,7 @@ static int __init ste_hsi_add_controller(struct hsi_controller *hsi, char overrun_name[] = "hsi_rx_overrun_chxxx"; unsigned char i; int err; + unsigned long rate; ste_hsi = kzalloc(sizeof(struct ste_hsi_controller), GFP_KERNEL); if (!ste_hsi) { @@ -1586,6 +1587,22 @@ static int __init ste_hsi_add_controller(struct hsi_controller *hsi, goto err_clk_free; } + /* Set HSITXCLK rate to 100 MHz */ + rate = clk_round_rate(ste_hsi->tx_clk, 100000000); + err = clk_set_rate(ste_hsi->tx_clk, rate); + if (unlikely(err)) { + dev_err(&hsi->device, "Couldn't set HSIT clock rate\n"); + goto err_clk_free; + } + + /* Set HSIRXCLK rate to 200 MHz */ + rate = clk_round_rate(ste_hsi->rx_clk, 200000000); + err = clk_set_rate(ste_hsi->rx_clk, rate); + if (unlikely(err)) { + dev_err(&hsi->device, "Couldn't set HSIR clock rate\n"); + goto err_clk_free; + } + err = ste_hsi_clock_enable(hsi); if (unlikely(err)) goto err_clk_free; -- cgit v1.2.3 From 3eb3e1775ead27876884f5119342cdd8ffb091b9 Mon Sep 17 00:00:00 2001 From: Pawel Szyszuk Date: Thu, 6 Oct 2011 11:04:12 +0100 Subject: ARM: U9500: HSI RX PIPELINE buffer flush ST-Ericsson Linux next: NA ST-Ericsson ID: 365683 ST-Ericsson FOSS-OUT ID: NA Change-Id: I25b92d815447d2746ba25a9db132b242b3da9d9c Signed-off-by: Pawel Szyszuk Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/33223 Reviewed-by: Christopher BLAIR Reviewed-by: Derek MORTON Reviewed-by: Andrew LYNN --- drivers/hsi/controllers/ste_hsi.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/drivers/hsi/controllers/ste_hsi.c b/drivers/hsi/controllers/ste_hsi.c index a2cbb2108f7..fe43d7b2133 100644 --- a/drivers/hsi/controllers/ste_hsi.c +++ b/drivers/hsi/controllers/ste_hsi.c @@ -1353,17 +1353,13 @@ static int ste_hsi_flush(struct hsi_client *cl) writel(0, ste_hsi->rx_base + STE_HSI_RX_DMAEN); ste_hsi_terminate_dma(ste_port); - /* Flush all HSIR and HSIT buffers */ + /* Flush HSIT buffers */ writel(0, ste_hsi->tx_base + STE_HSI_TX_STATE); writel(0, ste_hsi->tx_base + STE_HSI_TX_BUFSTATE); + + /* Flush HSIR pipeline and channel buffers */ writel(0, ste_hsi->rx_base + STE_HSI_RX_STATE); - /* - * BUFSTATE is cleared twice on purpose: - * first time all fifos are cleared - * second time to clear data that was in pipline buffer - * and was transfered to fifos - */ - writel(0, ste_hsi->rx_base + STE_HSI_RX_BUFSTATE); + writel(0, ste_hsi->rx_base + STE_HSI_RX_PIPEGAUGE); writel(0, ste_hsi->rx_base + STE_HSI_RX_BUFSTATE); /* Flush all errors */ -- cgit v1.2.3 From ecc25025b1584b32fd0d51895dde1e9265803d24 Mon Sep 17 00:00:00 2001 From: Pawel Szyszuk Date: Thu, 6 Oct 2011 12:35:01 +0100 Subject: ARM: U9500: HSI registers update to DB8500 V2 spec ST-Ericsson Linux next: NA ST-Ericsson ID: 365681 ST-Ericsson FOSS-OUT ID: NA Change-Id: I396c2a4d44ded438308e358aa0ad976a397d4dd6 Signed-off-by: Pawel Szyszuk Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/33224 Reviewed-by: Christopher BLAIR Reviewed-by: Derek MORTON Reviewed-by: Andrew LYNN --- arch/arm/mach-ux500/include/mach/hsi.h | 11 +++++------ drivers/hsi/controllers/ste_hsi.c | 7 ++----- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/arch/arm/mach-ux500/include/mach/hsi.h b/arch/arm/mach-ux500/include/mach/hsi.h index 1d2ab206e27..58d33249cae 100644 --- a/arch/arm/mach-ux500/include/mach/hsi.h +++ b/arch/arm/mach-ux500/include/mach/hsi.h @@ -16,13 +16,11 @@ #define STE_HSI_TX_IOSTATE 0x00C #define STE_HSI_TX_BUFSTATE 0x010 #define STE_HSI_TX_DIVISOR 0x014 -#define STE_HSI_TX_PARITY 0x018 #define STE_HSI_TX_BREAK 0x01C #define STE_HSI_TX_CHANNELS 0x020 #define STE_HSI_TX_FLUSHBITS 0x024 #define STE_HSI_TX_PRIORITY 0x028 -#define STE_HSI_TX_BURSTLEN 0x02C -#define STE_HSI_TX_PREAMBLE 0x030 +#define STE_HSI_TX_STATICCONFID 0x02C #define STE_HSI_TX_DATASWAP 0x034 #define STE_HSI_TX_FRAMELENX 0x080 #define STE_HSI_TX_BUFFERX 0x0C0 @@ -31,10 +29,11 @@ #define STE_HSI_TX_GAUGEX 0x180 #define STE_HSI_TX_WATERMARKX 0x1C0 #define STE_HSI_TX_DMAEN 0x200 -#define STE_HSI_TX_WATERMARKIS 0x204 +#define STE_HSI_TX_WATERMARKMIS 0x204 #define STE_HSI_TX_WATERMARKIM 0x208 #define STE_HSI_TX_WATERMARKIC 0x20C #define STE_HSI_TX_WATERMARKID 0x210 +#define STE_HSI_TX_WATERMARKIS 0x214 #define STE_HSI_TX_PERIPHID0 0xFE0 #define STE_HSI_TX_PERIPHID1 0xFE4 #define STE_HSI_TX_PERIPHID2 0xFE8 @@ -46,7 +45,6 @@ #define STE_HSI_RX_STATE 0x008 #define STE_HSI_RX_BUFSTATE 0x00C #define STE_HSI_RX_THRESHOLD 0x010 -#define STE_HSI_RX_PARITY 0x014 #define STE_HSI_RX_DETECTOR 0x018 #define STE_HSI_RX_EXCEP 0x01C #define STE_HSI_RX_ACK 0x020 @@ -65,7 +63,7 @@ #define STE_HSI_RX_WATERMARKX 0x1C0 #define STE_HSI_RX_FRAMEBURSTCNT 0x1E0 #define STE_HSI_RX_DMAEN 0x200 -#define STE_HSI_RX_WATERMARKIS 0x204 +#define STE_HSI_RX_WATERMARKMIS 0x204 #define STE_HSI_RX_WATERMARKIM 0x208 #define STE_HSI_RX_WATERMARKIC 0x20C #define STE_HSI_RX_WATERMARKID 0x210 @@ -73,6 +71,7 @@ #define STE_HSI_RX_OVERRUNIM 0x218 #define STE_HSI_RX_EXCEPMIS 0x21C #define STE_HSI_RX_EXCEPIM 0x220 +#define STE_HSI_RX_WATERMARKIS 0x224 #define STE_HSI_RX_PERIPHID0 0xFE0 #define STE_HSI_RX_PERIPHID1 0xFE4 #define STE_HSI_RX_PERIPHID2 0xFE8 diff --git a/drivers/hsi/controllers/ste_hsi.c b/drivers/hsi/controllers/ste_hsi.c index fe43d7b2133..8348a66697d 100644 --- a/drivers/hsi/controllers/ste_hsi.c +++ b/drivers/hsi/controllers/ste_hsi.c @@ -141,8 +141,6 @@ static void ste_hsi_init_registers(struct ste_hsi_controller *ste_hsi) writel(0, ste_hsi->tx_base + STE_HSI_TX_FLUSHBITS); /* TO DO: TX channel priorities will be implemented later */ writel(0, ste_hsi->tx_base + STE_HSI_TX_PRIORITY); - writel(0, ste_hsi->tx_base + STE_HSI_TX_BURSTLEN); - writel(0, ste_hsi->tx_base + STE_HSI_TX_PREAMBLE); writel(0, ste_hsi->tx_base + STE_HSI_TX_DATASWAP); writel(0, ste_hsi->tx_base + STE_HSI_TX_DMAEN); writel(0, ste_hsi->tx_base + STE_HSI_TX_WATERMARKID); @@ -187,7 +185,6 @@ static void ste_hsi_setup_registers(struct ste_hsi_controller *ste_hsi) */ writel(pcontext->tx_mode, ste_hsi->tx_base + STE_HSI_TX_MODE); writel(pcontext->tx_divisor, ste_hsi->tx_base + STE_HSI_TX_DIVISOR); - writel(0, ste_hsi->tx_base + STE_HSI_TX_PARITY); writel(pcontext->tx_channels, ste_hsi->tx_base + STE_HSI_TX_CHANNELS); /* Calculate buffers number per channel */ buffers = STE_HSI_MAX_BUFFERS / pcontext->tx_channels; @@ -879,7 +876,7 @@ static void ste_hsi_rx_tasklet(unsigned long data) u32 irq_status, irq_mask; unsigned int i; - irq_status = readl(ste_hsi->rx_base + STE_HSI_RX_WATERMARKIS); + irq_status = readl(ste_hsi->rx_base + STE_HSI_RX_WATERMARKMIS); if (!irq_status) goto out; @@ -927,7 +924,7 @@ static void ste_hsi_tx_tasklet(unsigned long data) u32 irq_status, irq_mask; unsigned int i; - irq_status = readl(ste_hsi->tx_base + STE_HSI_TX_WATERMARKIS); + irq_status = readl(ste_hsi->tx_base + STE_HSI_TX_WATERMARKMIS); if (!irq_status) goto out; -- cgit v1.2.3 From ba0ed34551a37770402cc841ab8e4d0c14947ee4 Mon Sep 17 00:00:00 2001 From: Pawel Szyszuk Date: Wed, 10 Aug 2011 09:12:30 +0100 Subject: drivers: U9500: Add WAKE support in ST-E HSI Added support for IRQ_PRCMU_HSI0 (HSI0_CAWAKE event). GPIO pin is used to control ACWAKE output line. ST-Ericsson ID: 365660 ST-Ericsson Linux next: NA ST-Ericsson FOSS-OUT ID: Trivial Change-Id: Iefa11ef3466c599af20025ee71f879b9e41c06d9 Signed-off-by: Pawel Szyszuk Reviewed-on: http://gerrit.lud.stericsson.com/gerrit/36089 Reviewed-by: Andrew LYNN --- drivers/hsi/controllers/ste_hsi.c | 90 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 90 insertions(+) diff --git a/drivers/hsi/controllers/ste_hsi.c b/drivers/hsi/controllers/ste_hsi.c index 8348a66697d..34e249f0a91 100644 --- a/drivers/hsi/controllers/ste_hsi.c +++ b/drivers/hsi/controllers/ste_hsi.c @@ -14,6 +14,8 @@ #include #include #include +#include +#include #ifdef CONFIG_STE_DMA40 #include @@ -88,9 +90,12 @@ struct ste_hsi_port { struct list_head txqueue[STE_HSI_MAX_CHANNELS]; struct list_head rxqueue[STE_HSI_MAX_CHANNELS]; struct list_head brkqueue; + int cawake_irq; + int acwake_gpio; int tx_irq; int rx_irq; int excep_irq; + struct tasklet_struct cawake_tasklet; struct tasklet_struct rx_tasklet; struct tasklet_struct tx_tasklet; struct tasklet_struct exception_tasklet; @@ -867,6 +872,38 @@ out: spin_unlock_bh(&ste_hsi->lock); } +static void ste_hsi_cawake_tasklet(unsigned long data) +{ + struct hsi_port *port = (struct hsi_port *)data; + struct hsi_controller *hsi = to_hsi_controller(port->device.parent); + struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi); + struct ste_hsi_port *ste_port = hsi_port_drvdata(port); + u32 prcm_line_value; + int level; + + prcm_line_value = prcmu_read(DB8500_PRCM_LINE_VALUE); + level = (prcm_line_value & DB8500_PRCM_LINE_VALUE_HSI_CAWAKE0) ? 1 : 0; + + dev_info(ste_hsi->dev, "cawake %s\n", level ? "HIGH" : "LOW"); + hsi_event(hsi->port, level ? HSI_EVENT_START_RX : HSI_EVENT_STOP_RX); + enable_irq(ste_port->cawake_irq); +} + +static irqreturn_t ste_hsi_cawake_isr(int irq, void *data) +{ + struct hsi_port *port = data; + + /* IRQ processed only if device initialized */ + if ((port->device.parent) && (data)) { + struct ste_hsi_port *ste_port = hsi_port_drvdata(port); + + disable_irq_nosync(irq); + tasklet_hi_schedule(&ste_port->cawake_tasklet); + } + + return IRQ_HANDLED; +} + static void ste_hsi_rx_tasklet(unsigned long data) { struct hsi_port *port = (struct hsi_port *)data; @@ -1145,6 +1182,36 @@ static int __init ste_hsi_get_iomem(struct platform_device *pdev, return 0; } +static int __init ste_hsi_acwake_gpio_init(struct platform_device *pdev, + int *gpio) +{ + int err = 0; + const char *gpio_name = "hsi0_acwake"; + struct resource *resource; + + resource = platform_get_resource_byname(pdev, IORESOURCE_IO, gpio_name); + if (unlikely(!resource)) { + dev_err(&pdev->dev, "hsi0_acwake does not exist\n"); + return -EINVAL; + } + + *gpio = resource->start; + err = gpio_request(*gpio, gpio_name); + if (err < 0) { + dev_err(&pdev->dev, "Can't request GPIO %d\n", *gpio); + return err; + } + + /* Initial level set to 0 (LOW) */ + err = gpio_direction_output(*gpio, 0); + if (err < 0) { + dev_err(&pdev->dev, "Can't init GPIO %d\n", *gpio); + gpio_free(*gpio); + } + + return err; +} + static int __init ste_hsi_get_irq(struct platform_device *pdev, const char *res_name, irqreturn_t(*isr) (int, void *), void *data, @@ -1392,21 +1459,27 @@ static int ste_hsi_flush(struct hsi_client *cl) static int ste_hsi_start_tx(struct hsi_client *cl) { struct hsi_port *port = to_hsi_port(cl->device.parent); + struct ste_hsi_port *ste_port = hsi_port_drvdata(port); struct hsi_controller *hsi = to_hsi_controller(port->device.parent); struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi); if (ste_hsi->regulator) regulator_enable(ste_hsi->regulator); + gpio_set_value(ste_port->acwake_gpio, 1); /* HIGH */ + return 0; } static int ste_hsi_stop_tx(struct hsi_client *cl) { struct hsi_port *port = to_hsi_port(cl->device.parent); + struct ste_hsi_port *ste_port = hsi_port_drvdata(port); struct hsi_controller *hsi = to_hsi_controller(port->device.parent); struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi); + gpio_set_value(ste_port->acwake_gpio, 0); /* LOW */ + if (ste_hsi->regulator) regulator_disable(ste_hsi->regulator); @@ -1449,6 +1522,16 @@ static int ste_hsi_ports_init(struct hsi_controller *hsi, hsi_port_set_drvdata(port, ste_port); ste_port->dev = &port->device; + err = ste_hsi_acwake_gpio_init(pdev, &ste_port->acwake_gpio); + if (err) + return err; + + sprintf(irq_name, "hsi0_cawake"); + err = ste_hsi_get_irq(pdev, irq_name, ste_hsi_cawake_isr, port, + &ste_port->cawake_irq); + if (err) + return err; + sprintf(irq_name, "hsi_rx_irq%d", i); err = ste_hsi_get_irq(pdev, irq_name, ste_hsi_rx_isr, port, &ste_port->rx_irq); @@ -1461,6 +1544,9 @@ static int ste_hsi_ports_init(struct hsi_controller *hsi, if (err) return err; + tasklet_init(&ste_port->cawake_tasklet, ste_hsi_cawake_tasklet, + (unsigned long)port); + tasklet_init(&ste_port->rx_tasklet, ste_hsi_rx_tasklet, (unsigned long)port); @@ -1658,10 +1744,14 @@ static int ste_hsi_remove_controller(struct hsi_controller *hsi, struct platform_device *pdev) { struct ste_hsi_controller *ste_hsi = hsi_controller_drvdata(hsi); + struct hsi_port *port = to_hsi_port(&pdev->dev); + struct ste_hsi_port *ste_port = hsi_port_drvdata(port); if (ste_hsi->regulator) regulator_put(ste_hsi->regulator); + gpio_free(ste_port->acwake_gpio); + ste_hsi_clks_free(ste_hsi); hsi_unregister_controller(hsi); -- cgit v1.2.3