diff options
author | Benn Pörscke <benn.porscke@stericsson.com> | 2011-10-07 15:31:57 +0200 |
---|---|---|
committer | Benn Pörscke <benn.porscke@stericsson.com> | 2011-10-07 15:31:57 +0200 |
commit | 47a4dbf83a75014d6b3467be18997894f1c617db (patch) | |
tree | 7f5d116db48205309fbc4ae0954f20ab8a651e46 /drivers/misc | |
parent | ea8a52f9f4bcc3420c38ae07f8378a2f18443970 (diff) |
Squashandroid-20111012
Change-Id: If0ae9fa8067740ab2ede33703c79ec134f204a5e
Diffstat (limited to 'drivers/misc')
47 files changed, 25011 insertions, 0 deletions
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 26386a92f5a..942a7a280cd 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -53,6 +53,10 @@ config AD525X_DPOT_SPI To compile this driver as a module, choose M here: the module will be called ad525x_dpot-spi. +config ANDROID_PMEM + bool "Android pmem allocator" + default y + config ATMEL_PWM tristate "Atmel AT32/AT91 PWM support" depends on AVR32 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91CAP9 @@ -62,6 +66,15 @@ config ATMEL_PWM purposes including software controlled power-efficient backlights on LCD displays, motor control, and waveform generation. +config AB8500_PWM + bool "AB8500 PWM support" + depends on AB8500_CORE + select HAVE_PWM + help + This driver exports functions to enable/disble/config/free Pulse + Width Modulation in the Analog Baseband Chip AB8500. + It is used by led and backlight driver to control the intensity. + config ATMEL_TCLIB bool "Atmel AT32/AT91 Timer/Counter Library" depends on (AVR32 || ARCH_AT91) @@ -199,6 +212,13 @@ config ENCLOSURE_SERVICES driver (SCSI/ATA) which supports enclosures or a SCSI enclosure device (SES) to use these services. +config KERNEL_DEBUGGER_CORE + bool "Kernel Debugger Core" + default n + ---help--- + Generic kernel debugging command processor used by low level + (interrupt context) platform-specific debuggers. + config SGI_XP tristate "Support communication between SGI SSIs" depends on NET @@ -304,6 +324,24 @@ config SENSORS_TSL2550 This driver can also be built as a module. If so, the module will be called tsl2550. +config SENSORS_AK8975 + tristate "AK8975 compass support" + default n + depends on I2C + help + If you say yes here you get support for Asahi Kasei's + orientation sensor AK8975. + +config SENSORS_BH1780 + tristate "ROHM BH1780GLI ambient light sensor" + depends on I2C && SYSFS + help + If you say yes here you get support for the ROHM BH1780GLI + ambient light sensor. + + This driver can also be built as a module. If so, the module + will be called bh1780gli. + config EP93XX_PWM tristate "EP93xx PWM support" depends on ARCH_EP93XX @@ -337,6 +375,14 @@ config TI_DAC7512 This driver can also be built as a module. If so, the module will be calles ti_dac7512. +config UID_STAT + bool "UID based statistics tracking exported to /proc/uid_stat" + default n + +config IFACE_STAT + bool "Persistent interface statistics tracking exported to /proc/iface_stat" + default n + config VMWARE_BALLOON tristate "VMware Balloon Driver" depends on X86 @@ -353,9 +399,90 @@ config VMWARE_BALLOON To compile this driver as a module, choose M here: the module will be called vmware_balloon. +config WL127X_RFKILL + tristate "Bluetooth power control driver for TI wl127x" + depends on RFKILL + default n + ---help--- + Creates an rfkill entry in sysfs for power control of Bluetooth + TI wl127x chips. + +config APANIC + bool "Android kernel panic diagnostics driver" + default n + ---help--- + Driver which handles kernel panics and attempts to write + critical debugging data to flash. + +config APANIC_PLABEL + string "Android panic dump flash partition label" + depends on APANIC + default "kpanic" + ---help--- + If your platform uses a different flash partition label for storing + crashdumps, enter it here. + +config ARM_CHARLCD + bool "ARM Ltd. Character LCD Driver" + depends on PLAT_VERSATILE + help + This is a driver for the character LCD found on the ARM Ltd. + Versatile and RealView Platform Baseboards. It doesn't do + very much more than display the text "ARM Linux" on the first + line and the Linux version on the second line, but that's + still useful. + +config STE_TRACE_MODEM + tristate "DB8500 trace Modem" + depends on ARCH_U8500 + default n + help + Select this option to enable modem tracing by APE + +config HWMEM + bool "Hardware memory driver" + default n + help + This driver provides a way to allocate contiguous system memory which + can be used by hardware. It also enables accessing hwmem allocated + memory buffers through a secure id which can be shared across processes. + +config DBX500_MLOADER + tristate "Modem firmware loader for db8500" + default n + depends on UX500_SOC_DB8500 || UX500_SOC_DB5500 + help + Provides a user interface to load modem firmware on dbx500 SOCs + +config STM_TRACE + bool "Ux500 STM Trace driver" + depends on ARCH_U8500 + default n + help + Simple System Trace Module driver. It allows to use and configure the + STM, either from kernel space, or from user space. + +config DISPDEV + bool "Display overlay device" + default n + help + This driver provides a way to use a second overlay for a display (in + addition to the framebuffer). The device allows for registration of + userspace buffers to be used with the overlay. + +config U5500_MBOX + bool "Mailbox support" + depends on (UX500_SOC_DB5500 && U5500_MODEM_IRQ) + default y + help + Add support for U5500 mailbox communication with modem side + source "drivers/misc/c2port/Kconfig" source "drivers/misc/eeprom/Kconfig" source "drivers/misc/cb710/Kconfig" source "drivers/misc/iwmc3200top/Kconfig" +source "drivers/misc/i2s/Kconfig" +source "drivers/misc/shrm/Kconfig" +source "drivers/misc/audio_io_dev/Kconfig" endif # MISC_DEVICES diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 6ed06a19474..ef21612c261 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -14,8 +14,11 @@ obj-$(CONFIG_LKDTM) += lkdtm.o obj-$(CONFIG_TIFM_CORE) += tifm_core.o obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o obj-$(CONFIG_PHANTOM) += phantom.o +obj-$(CONFIG_ANDROID_PMEM) += pmem.o +obj-$(CONFIG_SENSORS_BH1780) += bh1780gli.o obj-$(CONFIG_SGI_IOC4) += ioc4.o obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o +obj-$(CONFIG_KERNEL_DEBUGGER_CORE) += kernel_debugger.o obj-$(CONFIG_KGDB_TESTS) += kgdbts.o obj-$(CONFIG_SGI_XP) += sgi-xp/ obj-$(CONFIG_SGI_GRU) += sgi-gru/ @@ -26,8 +29,24 @@ obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o obj-$(CONFIG_DS1682) += ds1682.o obj-$(CONFIG_TI_DAC7512) += ti_dac7512.o +obj-$(CONFIG_UID_STAT) += uid_stat.o +obj-$(CONFIG_IFACE_STAT) += iface_stat.o obj-$(CONFIG_C2PORT) += c2port/ obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/ obj-y += eeprom/ obj-y += cb710/ obj-$(CONFIG_VMWARE_BALLOON) += vmware_balloon.o +obj-$(CONFIG_WL127X_RFKILL) += wl127x-rfkill.o +obj-$(CONFIG_APANIC) += apanic.o +obj-$(CONFIG_SENSORS_AK8975) += akm8975.o +obj-$(CONFIG_STM_I2S) += i2s/ +obj-$(CONFIG_STE_TRACE_MODEM) += db8500-modem-trace.o +obj-$(CONFIG_U8500_SHRM) += shrm/ +obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o +obj-$(CONFIG_STE_AUDIO_IO_DEV) += audio_io_dev/ +obj-$(CONFIG_HWMEM) += hwmem/ +obj-$(CONFIG_AB8500_PWM) += ab8500-pwm.o +obj-$(CONFIG_DBX500_MLOADER) += dbx500-mloader.o +obj-$(CONFIG_STM_TRACE) += stm.o +obj-$(CONFIG_DISPDEV) += dispdev/ +obj-$(CONFIG_U5500_MBOX) += mbox.o mbox_channels-db5500.o diff --git a/drivers/misc/ab8500-pwm.c b/drivers/misc/ab8500-pwm.c new file mode 100644 index 00000000000..e56d9c993da --- /dev/null +++ b/drivers/misc/ab8500-pwm.c @@ -0,0 +1,209 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * Author: Arun R Murthy <arun.murthy@stericsson.com> + * License terms: GNU General Public License (GPL) version 2 + */ +#include <linux/err.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/pwm.h> +#include <linux/clk.h> +#include <linux/mfd/ab8500.h> +#include <linux/mfd/abx500.h> + +/* + * PWM Out generators + * Bank: 0x10 + */ +#define AB8500_PWM_OUT_CTRL1_REG 0x60 +#define AB8500_PWM_OUT_CTRL2_REG 0x61 +#define AB8500_PWM_OUT_CTRL7_REG 0x66 + +/* backlight driver constants */ +#define ENABLE_PWM 1 +#define DISABLE_PWM 0 + +struct pwm_device { + struct device *dev; + struct list_head node; + struct clk *clk; + const char *label; + unsigned int pwm_id; + bool clk_enabled; +}; + +static LIST_HEAD(pwm_list); + +int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns) +{ + int ret = 0; + unsigned int higher_val, lower_val; + u8 reg; + + /* + * get the first 8 bits that are be written to + * AB8500_PWM_OUT_CTRL1_REG[0:7] + */ + lower_val = duty_ns & 0x00FF; + /* + * get bits [9:10] that are to be written to + * AB8500_PWM_OUT_CTRL2_REG[0:1] + */ + higher_val = ((duty_ns & 0x0300) >> 8); + + reg = AB8500_PWM_OUT_CTRL1_REG + ((pwm->pwm_id - 1) * 2); + + ret = abx500_set_register_interruptible(pwm->dev, AB8500_MISC, + reg, (u8)lower_val); + if (ret < 0) + return ret; + ret = abx500_set_register_interruptible(pwm->dev, AB8500_MISC, + (reg + 1), (u8)higher_val); + + return ret; +} +EXPORT_SYMBOL(pwm_config); + +int pwm_enable(struct pwm_device *pwm) +{ + int ret; + + if (!pwm->clk_enabled) { + ret = clk_enable(pwm->clk); + if (ret < 0) { + dev_err(pwm->dev, "failed to enable clock\n"); + return ret; + } + pwm->clk_enabled = true; + } + ret = abx500_mask_and_set_register_interruptible(pwm->dev, + AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG, + 1 << (pwm->pwm_id-1), 1 << (pwm->pwm_id-1)); + if (ret < 0) + dev_err(pwm->dev, "%s: Failed to disable PWM, Error %d\n", + pwm->label, ret); + return ret; +} +EXPORT_SYMBOL(pwm_enable); + +void pwm_disable(struct pwm_device *pwm) +{ + int ret; + + ret = abx500_mask_and_set_register_interruptible(pwm->dev, + AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG, + 1 << (pwm->pwm_id-1), DISABLE_PWM); + /* + * Workaround to set PWM in disable. + * If enable bit is not toggled the PWM might output 50/50 duty cycle + * even though it should be disabled + */ + ret &= abx500_mask_and_set_register_interruptible(pwm->dev, + AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG, + 1 << (pwm->pwm_id-1), + ENABLE_PWM << (pwm->pwm_id-1)); + ret &= abx500_mask_and_set_register_interruptible(pwm->dev, + AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG, + 1 << (pwm->pwm_id-1), DISABLE_PWM); + + if (ret < 0) + dev_err(pwm->dev, "%s: Failed to disable PWM, Error %d\n", + pwm->label, ret); + if (pwm->clk_enabled) { + clk_disable(pwm->clk); + pwm->clk_enabled = false; + } + + return; +} +EXPORT_SYMBOL(pwm_disable); + +struct pwm_device *pwm_request(int pwm_id, const char *label) +{ + struct pwm_device *pwm; + + list_for_each_entry(pwm, &pwm_list, node) { + if (pwm->pwm_id == pwm_id) { + pwm->label = label; + pwm->pwm_id = pwm_id; + return pwm; + } + } + + return ERR_PTR(-ENOENT); +} +EXPORT_SYMBOL(pwm_request); + +void pwm_free(struct pwm_device *pwm) +{ + pwm_disable(pwm); +} +EXPORT_SYMBOL(pwm_free); + +static int __devinit ab8500_pwm_probe(struct platform_device *pdev) +{ + struct pwm_device *pwm; + int ret = 0; + + /* + * Nothing to be done in probe, this is required to get the + * device which is required for ab8500 read and write + */ + pwm = kzalloc(sizeof(struct pwm_device), GFP_KERNEL); + if (pwm == NULL) { + dev_err(&pdev->dev, "failed to allocate memory\n"); + return -ENOMEM; + } + pwm->dev = &pdev->dev; + pwm->pwm_id = pdev->id; + list_add_tail(&pwm->node, &pwm_list); + platform_set_drvdata(pdev, pwm); + + pwm->clk = clk_get(pwm->dev, NULL); + if (IS_ERR(pwm->clk)) { + dev_err(pwm->dev, "clock request failed\n"); + ret = PTR_ERR(pwm->clk); + kfree(pwm); + return ret; + } + pwm->clk_enabled = false; + dev_dbg(pwm->dev, "pwm probe successful\n"); + return ret; +} + +static int __devexit ab8500_pwm_remove(struct platform_device *pdev) +{ + struct pwm_device *pwm = platform_get_drvdata(pdev); + list_del(&pwm->node); + clk_put(pwm->clk); + dev_dbg(&pdev->dev, "pwm driver removed\n"); + kfree(pwm); + return 0; +} + +static struct platform_driver ab8500_pwm_driver = { + .driver = { + .name = "ab8500-pwm", + .owner = THIS_MODULE, + }, + .probe = ab8500_pwm_probe, + .remove = __devexit_p(ab8500_pwm_remove), +}; + +static int __init ab8500_pwm_init(void) +{ + return platform_driver_register(&ab8500_pwm_driver); +} + +static void __exit ab8500_pwm_exit(void) +{ + platform_driver_unregister(&ab8500_pwm_driver); +} + +subsys_initcall(ab8500_pwm_init); +module_exit(ab8500_pwm_exit); +MODULE_AUTHOR("Arun MURTHY <arun.murthy@stericsson.com>"); +MODULE_DESCRIPTION("AB8500 Pulse Width Modulation Driver"); +MODULE_ALIAS("AB8500 PWM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/akm8975.c b/drivers/misc/akm8975.c new file mode 100644 index 00000000000..830d2897afd --- /dev/null +++ b/drivers/misc/akm8975.c @@ -0,0 +1,732 @@ +/* drivers/misc/akm8975.c - akm8975 compass driver + * + * Copyright (C) 2007-2008 HTC Corporation. + * Author: Hou-Kun Chen <houkun.chen@gmail.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* + * Revised by AKM 2009/04/02 + * Revised by Motorola 2010/05/27 + * + */ + +#include <linux/interrupt.h> +#include <linux/i2c.h> +#include <linux/slab.h> +#include <linux/irq.h> +#include <linux/miscdevice.h> +#include <linux/gpio.h> +#include <linux/uaccess.h> +#include <linux/delay.h> +#include <linux/input.h> +#include <linux/workqueue.h> +#include <linux/freezer.h> +#include <linux/akm8975.h> +#include <linux/earlysuspend.h> + +#define AK8975DRV_CALL_DBG 0 +#if AK8975DRV_CALL_DBG +#define FUNCDBG(msg) pr_err("%s:%s\n", __func__, msg); +#else +#define FUNCDBG(msg) +#endif + +#define AK8975DRV_DATA_DBG 0 +#define MAX_FAILURE_COUNT 10 + +struct akm8975_data { + struct i2c_client *this_client; + struct akm8975_platform_data *pdata; + struct input_dev *input_dev; + struct work_struct work; + struct mutex flags_lock; +#ifdef CONFIG_HAS_EARLYSUSPEND + struct early_suspend early_suspend; +#endif +}; + +/* +* Because misc devices can not carry a pointer from driver register to +* open, we keep this global. This limits the driver to a single instance. +*/ +struct akm8975_data *akmd_data; + +static DECLARE_WAIT_QUEUE_HEAD(open_wq); + +static atomic_t open_flag; + +static short m_flag; +static short a_flag; +static short t_flag; +static short mv_flag; + +static short akmd_delay; + +static ssize_t akm8975_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + return sprintf(buf, "%u\n", i2c_smbus_read_byte_data(client, + AK8975_REG_CNTL)); +} +static ssize_t akm8975_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + unsigned long val; + strict_strtoul(buf, 10, &val); + if (val > 0xff) + return -EINVAL; + i2c_smbus_write_byte_data(client, AK8975_REG_CNTL, val); + return count; +} +static DEVICE_ATTR(akm_ms1, S_IWUSR | S_IRUGO, akm8975_show, akm8975_store); + +static int akm8975_i2c_rxdata(struct akm8975_data *akm, char *buf, int length) +{ + struct i2c_msg msgs[] = { + { + .addr = akm->this_client->addr, + .flags = 0, + .len = 1, + .buf = buf, + }, + { + .addr = akm->this_client->addr, + .flags = I2C_M_RD, + .len = length, + .buf = buf, + }, + }; + + FUNCDBG("called"); + + if (i2c_transfer(akm->this_client->adapter, msgs, 2) < 0) { + pr_err("akm8975_i2c_rxdata: transfer error\n"); + return EIO; + } else + return 0; +} + +static int akm8975_i2c_txdata(struct akm8975_data *akm, char *buf, int length) +{ + struct i2c_msg msgs[] = { + { + .addr = akm->this_client->addr, + .flags = 0, + .len = length, + .buf = buf, + }, + }; + + FUNCDBG("called"); + + if (i2c_transfer(akm->this_client->adapter, msgs, 1) < 0) { + pr_err("akm8975_i2c_txdata: transfer error\n"); + return -EIO; + } else + return 0; +} + +static void akm8975_ecs_report_value(struct akm8975_data *akm, short *rbuf) +{ + struct akm8975_data *data = i2c_get_clientdata(akm->this_client); + + FUNCDBG("called"); + +#if AK8975DRV_DATA_DBG + pr_info("akm8975_ecs_report_value: yaw = %d, pitch = %d, roll = %d\n", + rbuf[0], rbuf[1], rbuf[2]); + pr_info("tmp = %d, m_stat= %d, g_stat=%d\n", rbuf[3], rbuf[4], rbuf[5]); + pr_info("Acceleration: x = %d LSB, y = %d LSB, z = %d LSB\n", + rbuf[6], rbuf[7], rbuf[8]); + pr_info("Magnetic: x = %d LSB, y = %d LSB, z = %d LSB\n\n", + rbuf[9], rbuf[10], rbuf[11]); +#endif + mutex_lock(&akm->flags_lock); + /* Report magnetic sensor information */ + if (m_flag) { + input_report_abs(data->input_dev, ABS_RX, rbuf[0]); + input_report_abs(data->input_dev, ABS_RY, rbuf[1]); + input_report_abs(data->input_dev, ABS_RZ, rbuf[2]); + input_report_abs(data->input_dev, ABS_RUDDER, rbuf[4]); + } + + /* Report acceleration sensor information */ + if (a_flag) { + input_report_abs(data->input_dev, ABS_X, rbuf[6]); + input_report_abs(data->input_dev, ABS_Y, rbuf[7]); + input_report_abs(data->input_dev, ABS_Z, rbuf[8]); + input_report_abs(data->input_dev, ABS_WHEEL, rbuf[5]); + } + + /* Report temperature information */ + if (t_flag) + input_report_abs(data->input_dev, ABS_THROTTLE, rbuf[3]); + + if (mv_flag) { + input_report_abs(data->input_dev, ABS_HAT0X, rbuf[9]); + input_report_abs(data->input_dev, ABS_HAT0Y, rbuf[10]); + input_report_abs(data->input_dev, ABS_BRAKE, rbuf[11]); + } + mutex_unlock(&akm->flags_lock); + + input_sync(data->input_dev); +} + +static void akm8975_ecs_close_done(struct akm8975_data *akm) +{ + FUNCDBG("called"); + mutex_lock(&akm->flags_lock); + m_flag = 1; + a_flag = 1; + t_flag = 1; + mv_flag = 1; + mutex_unlock(&akm->flags_lock); +} + +static int akm_aot_open(struct inode *inode, struct file *file) +{ + int ret = -1; + + FUNCDBG("called"); + if (atomic_cmpxchg(&open_flag, 0, 1) == 0) { + wake_up(&open_wq); + ret = 0; + } + + ret = nonseekable_open(inode, file); + if (ret) + return ret; + + file->private_data = akmd_data; + + return ret; +} + +static int akm_aot_release(struct inode *inode, struct file *file) +{ + FUNCDBG("called"); + atomic_set(&open_flag, 0); + wake_up(&open_wq); + return 0; +} + +static int akm_aot_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg) +{ + void __user *argp = (void __user *) arg; + short flag; + struct akm8975_data *akm = file->private_data; + + FUNCDBG("called"); + + switch (cmd) { + case ECS_IOCTL_APP_SET_MFLAG: + case ECS_IOCTL_APP_SET_AFLAG: + case ECS_IOCTL_APP_SET_MVFLAG: + if (copy_from_user(&flag, argp, sizeof(flag))) + return -EFAULT; + if (flag < 0 || flag > 1) + return -EINVAL; + break; + case ECS_IOCTL_APP_SET_DELAY: + if (copy_from_user(&flag, argp, sizeof(flag))) + return -EFAULT; + break; + default: + break; + } + + mutex_lock(&akm->flags_lock); + switch (cmd) { + case ECS_IOCTL_APP_SET_MFLAG: + m_flag = flag; + break; + case ECS_IOCTL_APP_GET_MFLAG: + flag = m_flag; + break; + case ECS_IOCTL_APP_SET_AFLAG: + a_flag = flag; + break; + case ECS_IOCTL_APP_GET_AFLAG: + flag = a_flag; + break; + case ECS_IOCTL_APP_SET_MVFLAG: + mv_flag = flag; + break; + case ECS_IOCTL_APP_GET_MVFLAG: + flag = mv_flag; + break; + case ECS_IOCTL_APP_SET_DELAY: + akmd_delay = flag; + break; + case ECS_IOCTL_APP_GET_DELAY: + flag = akmd_delay; + break; + default: + return -ENOTTY; + } + mutex_unlock(&akm->flags_lock); + + switch (cmd) { + case ECS_IOCTL_APP_GET_MFLAG: + case ECS_IOCTL_APP_GET_AFLAG: + case ECS_IOCTL_APP_GET_MVFLAG: + case ECS_IOCTL_APP_GET_DELAY: + if (copy_to_user(argp, &flag, sizeof(flag))) + return -EFAULT; + break; + default: + break; + } + + return 0; +} + +static int akmd_open(struct inode *inode, struct file *file) +{ + int err = 0; + + FUNCDBG("called"); + err = nonseekable_open(inode, file); + if (err) + return err; + + file->private_data = akmd_data; + return 0; +} + +static int akmd_release(struct inode *inode, struct file *file) +{ + struct akm8975_data *akm = file->private_data; + + FUNCDBG("called"); + akm8975_ecs_close_done(akm); + return 0; +} + +static int akmd_ioctl(struct inode *inode, struct file *file, unsigned int cmd, + unsigned long arg) +{ + void __user *argp = (void __user *) arg; + + char rwbuf[16]; + int ret = -1; + int status; + short value[12]; + short delay; + struct akm8975_data *akm = file->private_data; + + FUNCDBG("called"); + + switch (cmd) { + case ECS_IOCTL_READ: + case ECS_IOCTL_WRITE: + if (copy_from_user(&rwbuf, argp, sizeof(rwbuf))) + return -EFAULT; + break; + + case ECS_IOCTL_SET_YPR: + if (copy_from_user(&value, argp, sizeof(value))) + return -EFAULT; + break; + + default: + break; + } + + switch (cmd) { + case ECS_IOCTL_READ: + if (rwbuf[0] < 1) + return -EINVAL; + + ret = akm8975_i2c_rxdata(akm, &rwbuf[1], rwbuf[0]); + if (ret < 0) + return ret; + break; + + case ECS_IOCTL_WRITE: + if (rwbuf[0] < 2) + return -EINVAL; + + ret = akm8975_i2c_txdata(akm, &rwbuf[1], rwbuf[0]); + if (ret < 0) + return ret; + break; + case ECS_IOCTL_SET_YPR: + akm8975_ecs_report_value(akm, value); + break; + + case ECS_IOCTL_GET_OPEN_STATUS: + wait_event_interruptible(open_wq, + (atomic_read(&open_flag) != 0)); + status = atomic_read(&open_flag); + break; + case ECS_IOCTL_GET_CLOSE_STATUS: + wait_event_interruptible(open_wq, + (atomic_read(&open_flag) == 0)); + status = atomic_read(&open_flag); + break; + + case ECS_IOCTL_GET_DELAY: + delay = akmd_delay; + break; + + default: + FUNCDBG("Unknown cmd\n"); + return -ENOTTY; + } + + switch (cmd) { + case ECS_IOCTL_READ: + if (copy_to_user(argp, &rwbuf, sizeof(rwbuf))) + return -EFAULT; + break; + case ECS_IOCTL_GET_OPEN_STATUS: + case ECS_IOCTL_GET_CLOSE_STATUS: + if (copy_to_user(argp, &status, sizeof(status))) + return -EFAULT; + break; + case ECS_IOCTL_GET_DELAY: + if (copy_to_user(argp, &delay, sizeof(delay))) + return -EFAULT; + break; + default: + break; + } + + return 0; +} + +/* needed to clear the int. pin */ +static void akm_work_func(struct work_struct *work) +{ + struct akm8975_data *akm = + container_of(work, struct akm8975_data, work); + + FUNCDBG("called"); + enable_irq(akm->this_client->irq); +} + +static irqreturn_t akm8975_interrupt(int irq, void *dev_id) +{ + struct akm8975_data *akm = dev_id; + FUNCDBG("called"); + + disable_irq_nosync(akm->this_client->irq); + schedule_work(&akm->work); + return IRQ_HANDLED; +} + +static int akm8975_power_off(struct akm8975_data *akm) +{ +#if AK8975DRV_CALL_DBG + pr_info("%s\n", __func__); +#endif + if (akm->pdata->power_off) + akm->pdata->power_off(); + + return 0; +} + +static int akm8975_power_on(struct akm8975_data *akm) +{ + int err; + +#if AK8975DRV_CALL_DBG + pr_info("%s\n", __func__); +#endif + if (akm->pdata->power_on) { + err = akm->pdata->power_on(); + if (err < 0) + return err; + } + return 0; +} + +static int akm8975_suspend(struct i2c_client *client, pm_message_t mesg) +{ + struct akm8975_data *akm = i2c_get_clientdata(client); + +#if AK8975DRV_CALL_DBG + pr_info("%s\n", __func__); +#endif + /* TO DO: might need more work after power mgmt + is enabled */ + return akm8975_power_off(akm); +} + +static int akm8975_resume(struct i2c_client *client) +{ + struct akm8975_data *akm = i2c_get_clientdata(client); + +#if AK8975DRV_CALL_DBG + pr_info("%s\n", __func__); +#endif + /* TO DO: might need more work after power mgmt + is enabled */ + return akm8975_power_on(akm); +} + +#ifdef CONFIG_HAS_EARLYSUSPEND +static void akm8975_early_suspend(struct early_suspend *handler) +{ + struct akm8975_data *akm; + akm = container_of(handler, struct akm8975_data, early_suspend); + +#if AK8975DRV_CALL_DBG + pr_info("%s\n", __func__); +#endif + akm8975_suspend(akm->this_client, PMSG_SUSPEND); +} + +static void akm8975_early_resume(struct early_suspend *handler) +{ + struct akm8975_data *akm; + akm = container_of(handler, struct akm8975_data, early_suspend); + +#if AK8975DRV_CALL_DBG + pr_info("%s\n", __func__); +#endif + akm8975_resume(akm->this_client); +} +#endif + + +static int akm8975_init_client(struct i2c_client *client) +{ + struct akm8975_data *data; + int ret; + + data = i2c_get_clientdata(client); + + ret = request_irq(client->irq, akm8975_interrupt, IRQF_TRIGGER_RISING, + "akm8975", data); + + if (ret < 0) { + pr_err("akm8975_init_client: request irq failed\n"); + goto err; + } + + init_waitqueue_head(&open_wq); + + mutex_lock(&data->flags_lock); + m_flag = 1; + a_flag = 1; + t_flag = 1; + mv_flag = 1; + mutex_unlock(&data->flags_lock); + + return 0; +err: + return ret; +} + +static const struct file_operations akmd_fops = { + .owner = THIS_MODULE, + .open = akmd_open, + .release = akmd_release, + .ioctl = akmd_ioctl, +}; + +static const struct file_operations akm_aot_fops = { + .owner = THIS_MODULE, + .open = akm_aot_open, + .release = akm_aot_release, + .ioctl = akm_aot_ioctl, +}; + +static struct miscdevice akm_aot_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "akm8975_aot", + .fops = &akm_aot_fops, +}; + +static struct miscdevice akmd_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "akm8975_dev", + .fops = &akmd_fops, +}; + +int akm8975_probe(struct i2c_client *client, + const struct i2c_device_id *devid) +{ + struct akm8975_data *akm; + int err; + FUNCDBG("called"); + + if (client->dev.platform_data == NULL) { + dev_err(&client->dev, "platform data is NULL. exiting.\n"); + err = -ENODEV; + goto exit_platform_data_null; + } + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + dev_err(&client->dev, "platform data is NULL. exiting.\n"); + err = -ENODEV; + goto exit_check_functionality_failed; + } + + akm = kzalloc(sizeof(struct akm8975_data), GFP_KERNEL); + if (!akm) { + dev_err(&client->dev, + "failed to allocate memory for module data\n"); + err = -ENOMEM; + goto exit_alloc_data_failed; + } + + akm->pdata = client->dev.platform_data; + + mutex_init(&akm->flags_lock); + INIT_WORK(&akm->work, akm_work_func); + i2c_set_clientdata(client, akm); + + err = akm8975_power_on(akm); + if (err < 0) + goto exit_power_on_failed; + + akm8975_init_client(client); + akm->this_client = client; + akmd_data = akm; + + akm->input_dev = input_allocate_device(); + if (!akm->input_dev) { + err = -ENOMEM; + dev_err(&akm->this_client->dev, + "input device allocate failed\n"); + goto exit_input_dev_alloc_failed; + } + + set_bit(EV_ABS, akm->input_dev->evbit); + + /* yaw */ + input_set_abs_params(akm->input_dev, ABS_RX, 0, 23040, 0, 0); + /* pitch */ + input_set_abs_params(akm->input_dev, ABS_RY, -11520, 11520, 0, 0); + /* roll */ + input_set_abs_params(akm->input_dev, ABS_RZ, -5760, 5760, 0, 0); + /* x-axis acceleration */ + input_set_abs_params(akm->input_dev, ABS_X, -5760, 5760, 0, 0); + /* y-axis acceleration */ + input_set_abs_params(akm->input_dev, ABS_Y, -5760, 5760, 0, 0); + /* z-axis acceleration */ + input_set_abs_params(akm->input_dev, ABS_Z, -5760, 5760, 0, 0); + /* temparature */ + input_set_abs_params(akm->input_dev, ABS_THROTTLE, -30, 85, 0, 0); + /* status of magnetic sensor */ + input_set_abs_params(akm->input_dev, ABS_RUDDER, 0, 3, 0, 0); + /* status of acceleration sensor */ + input_set_abs_params(akm->input_dev, ABS_WHEEL, 0, 3, 0, 0); + /* x-axis of raw magnetic vector */ + input_set_abs_params(akm->input_dev, ABS_HAT0X, -20480, 20479, 0, 0); + /* y-axis of raw magnetic vector */ + input_set_abs_params(akm->input_dev, ABS_HAT0Y, -20480, 20479, 0, 0); + /* z-axis of raw magnetic vector */ + input_set_abs_params(akm->input_dev, ABS_BRAKE, -20480, 20479, 0, 0); + + akm->input_dev->name = "compass"; + + err = input_register_device(akm->input_dev); + if (err) { + pr_err("akm8975_probe: Unable to register input device: %s\n", + akm->input_dev->name); + goto exit_input_register_device_failed; + } + + err = misc_register(&akmd_device); + if (err) { + pr_err("akm8975_probe: akmd_device register failed\n"); + goto exit_misc_device_register_failed; + } + + err = misc_register(&akm_aot_device); + if (err) { + pr_err("akm8975_probe: akm_aot_device register failed\n"); + goto exit_misc_device_register_failed; + } + + err = device_create_file(&client->dev, &dev_attr_akm_ms1); + +#ifdef CONFIG_HAS_EARLYSUSPEND + akm->early_suspend.suspend = akm8975_early_suspend; + akm->early_suspend.resume = akm8975_early_resume; + register_early_suspend(&akm->early_suspend); +#endif + return 0; + +exit_misc_device_register_failed: +exit_input_register_device_failed: + input_free_device(akm->input_dev); +exit_input_dev_alloc_failed: + akm8975_power_off(akm); +exit_power_on_failed: + kfree(akm); +exit_alloc_data_failed: +exit_check_functionality_failed: +exit_platform_data_null: + return err; +} + +static int __devexit akm8975_remove(struct i2c_client *client) +{ + struct akm8975_data *akm = i2c_get_clientdata(client); + FUNCDBG("called"); + free_irq(client->irq, NULL); + input_unregister_device(akm->input_dev); + misc_deregister(&akmd_device); + misc_deregister(&akm_aot_device); + akm8975_power_off(akm); + kfree(akm); + return 0; +} + +static const struct i2c_device_id akm8975_id[] = { + { "akm8975", 0 }, + { } +}; + +MODULE_DEVICE_TABLE(i2c, akm8975_id); + +static struct i2c_driver akm8975_driver = { + .probe = akm8975_probe, + .remove = akm8975_remove, +#ifndef CONFIG_HAS_EARLYSUSPEND + .resume = akm8975_resume, + .suspend = akm8975_suspend, +#endif + .id_table = akm8975_id, + .driver = { + .name = "akm8975", + }, +}; + +static int __init akm8975_init(void) +{ + pr_info("AK8975 compass driver: init\n"); + FUNCDBG("AK8975 compass driver: init\n"); + return i2c_add_driver(&akm8975_driver); +} + +static void __exit akm8975_exit(void) +{ + FUNCDBG("AK8975 compass driver: exit\n"); + i2c_del_driver(&akm8975_driver); +} + +module_init(akm8975_init); +module_exit(akm8975_exit); + +MODULE_AUTHOR("Hou-Kun Chen <hk_chen@htc.com>"); +MODULE_DESCRIPTION("AK8975 compass driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/apanic.c b/drivers/misc/apanic.c new file mode 100644 index 00000000000..ca875f89da7 --- /dev/null +++ b/drivers/misc/apanic.c @@ -0,0 +1,606 @@ +/* drivers/misc/apanic.c + * + * Copyright (C) 2009 Google, Inc. + * Author: San Mehat <san@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/device.h> +#include <linux/types.h> +#include <linux/delay.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/wakelock.h> +#include <linux/platform_device.h> +#include <linux/uaccess.h> +#include <linux/mtd/mtd.h> +#include <linux/notifier.h> +#include <linux/mtd/mtd.h> +#include <linux/debugfs.h> +#include <linux/fs.h> +#include <linux/proc_fs.h> +#include <linux/mutex.h> +#include <linux/workqueue.h> +#include <linux/preempt.h> + +extern void ram_console_enable_console(int); + +struct panic_header { + u32 magic; +#define PANIC_MAGIC 0xdeadf00d + + u32 version; +#define PHDR_VERSION 0x01 + + u32 console_offset; + u32 console_length; + + u32 threads_offset; + u32 threads_length; +}; + +struct apanic_data { + struct mtd_info *mtd; + struct panic_header curr; + void *bounce; + struct proc_dir_entry *apanic_console; + struct proc_dir_entry *apanic_threads; +}; + +static struct apanic_data drv_ctx; +static struct work_struct proc_removal_work; +static DEFINE_MUTEX(drv_mutex); + +static unsigned int *apanic_bbt; +static unsigned int apanic_erase_blocks; +static unsigned int apanic_good_blocks; + +static void set_bb(unsigned int block, unsigned int *bbt) +{ + unsigned int flag = 1; + + BUG_ON(block >= apanic_erase_blocks); + + flag = flag << (block%32); + apanic_bbt[block/32] |= flag; + apanic_good_blocks--; +} + +static unsigned int get_bb(unsigned int block, unsigned int *bbt) +{ + unsigned int flag; + + BUG_ON(block >= apanic_erase_blocks); + + flag = 1 << (block%32); + return apanic_bbt[block/32] & flag; +} + +static void alloc_bbt(struct mtd_info *mtd, unsigned int *bbt) +{ + int bbt_size; + apanic_erase_blocks = (mtd->size)>>(mtd->erasesize_shift); + bbt_size = (apanic_erase_blocks+32)/32; + + apanic_bbt = kmalloc(bbt_size*4, GFP_KERNEL); + memset(apanic_bbt, 0, bbt_size*4); + apanic_good_blocks = apanic_erase_blocks; +} +static void scan_bbt(struct mtd_info *mtd, unsigned int *bbt) +{ + int i; + + for (i = 0; i < apanic_erase_blocks; i++) { + if (mtd->block_isbad(mtd, i*mtd->erasesize)) + set_bb(i, apanic_bbt); + } +} + +#define APANIC_INVALID_OFFSET 0xFFFFFFFF + +static unsigned int phy_offset(struct mtd_info *mtd, unsigned int offset) +{ + unsigned int logic_block = offset>>(mtd->erasesize_shift); + unsigned int phy_block; + unsigned good_block = 0; + + for (phy_block = 0; phy_block < apanic_erase_blocks; phy_block++) { + if (!get_bb(phy_block, apanic_bbt)) + good_block++; + if (good_block == (logic_block + 1)) + break; + } + + if (good_block != (logic_block + 1)) + return APANIC_INVALID_OFFSET; + + return offset + ((phy_block-logic_block)<<mtd->erasesize_shift); +} + +static void apanic_erase_callback(struct erase_info *done) +{ + wait_queue_head_t *wait_q = (wait_queue_head_t *) done->priv; + wake_up(wait_q); +} + +static int apanic_proc_read(char *buffer, char **start, off_t offset, + int count, int *peof, void *dat) +{ + struct apanic_data *ctx = &drv_ctx; + size_t file_length; + off_t file_offset; + unsigned int page_no; + off_t page_offset; + int rc; + size_t len; + + if (!count) + return 0; + + mutex_lock(&drv_mutex); + + switch ((int) dat) { + case 1: /* apanic_console */ + file_length = ctx->curr.console_length; + file_offset = ctx->curr.console_offset; + break; + case 2: /* apanic_threads */ + file_length = ctx->curr.threads_length; + file_offset = ctx->curr.threads_offset; + break; + default: + pr_err("Bad dat (%d)\n", (int) dat); + mutex_unlock(&drv_mutex); + return -EINVAL; + } + + if ((offset + count) > file_length) { + mutex_unlock(&drv_mutex); + return 0; + } + + /* We only support reading a maximum of a flash page */ + if (count > ctx->mtd->writesize) + count = ctx->mtd->writesize; + + page_no = (file_offset + offset) / ctx->mtd->writesize; + page_offset = (file_offset + offset) % ctx->mtd->writesize; + + + if (phy_offset(ctx->mtd, (page_no * ctx->mtd->writesize)) + == APANIC_INVALID_OFFSET) { + pr_err("apanic: reading an invalid address\n"); + mutex_unlock(&drv_mutex); + return -EINVAL; + } + rc = ctx->mtd->read(ctx->mtd, + phy_offset(ctx->mtd, (page_no * ctx->mtd->writesize)), + ctx->mtd->writesize, + &len, ctx->bounce); + + if (page_offset) + count -= page_offset; + memcpy(buffer, ctx->bounce + page_offset, count); + + *start = count; + + if ((offset + count) == file_length) + *peof = 1; + + mutex_unlock(&drv_mutex); + return count; +} + +static void mtd_panic_erase(void) +{ + struct apanic_data *ctx = &drv_ctx; + struct erase_info erase; + DECLARE_WAITQUEUE(wait, current); + wait_queue_head_t wait_q; + int rc, i; + + init_waitqueue_head(&wait_q); + erase.mtd = ctx->mtd; + erase.callback = apanic_erase_callback; + erase.len = ctx->mtd->erasesize; + erase.priv = (u_long)&wait_q; + for (i = 0; i < ctx->mtd->size; i += ctx->mtd->erasesize) { + erase.addr = i; + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(&wait_q, &wait); + + if (get_bb(erase.addr>>ctx->mtd->erasesize_shift, apanic_bbt)) { + printk(KERN_WARNING + "apanic: Skipping erase of bad " + "block @%llx\n", erase.addr); + set_current_state(TASK_RUNNING); + remove_wait_queue(&wait_q, &wait); + continue; + } + + rc = ctx->mtd->erase(ctx->mtd, &erase); + if (rc) { + set_current_state(TASK_RUNNING); + remove_wait_queue(&wait_q, &wait); + printk(KERN_ERR + "apanic: Erase of 0x%llx, 0x%llx failed\n", + (unsigned long long) erase.addr, + (unsigned long long) erase.len); + if (rc == -EIO) { + if (ctx->mtd->block_markbad(ctx->mtd, + erase.addr)) { + printk(KERN_ERR + "apanic: Err marking blk bad\n"); + goto out; + } + printk(KERN_INFO + "apanic: Marked a bad block" + " @%llx\n", erase.addr); + set_bb(erase.addr>>ctx->mtd->erasesize_shift, + apanic_bbt); + continue; + } + goto out; + } + schedule(); + remove_wait_queue(&wait_q, &wait); + } + printk(KERN_DEBUG "apanic: %s partition erased\n", + CONFIG_APANIC_PLABEL); +out: + return; +} + +static void apanic_remove_proc_work(struct work_struct *work) +{ + struct apanic_data *ctx = &drv_ctx; + + mutex_lock(&drv_mutex); + mtd_panic_erase(); + memset(&ctx->curr, 0, sizeof(struct panic_header)); + if (ctx->apanic_console) { + remove_proc_entry("apanic_console", NULL); + ctx->apanic_console = NULL; + } + if (ctx->apanic_threads) { + remove_proc_entry("apanic_threads", NULL); + ctx->apanic_threads = NULL; + } + mutex_unlock(&drv_mutex); +} + +static int apanic_proc_write(struct file *file, const char __user *buffer, + unsigned long count, void *data) +{ + schedule_work(&proc_removal_work); + return count; +} + +static void mtd_panic_notify_add(struct mtd_info *mtd) +{ + struct apanic_data *ctx = &drv_ctx; + struct panic_header *hdr = ctx->bounce; + size_t len; + int rc; + int proc_entry_created = 0; + + if (strcmp(mtd->name, CONFIG_APANIC_PLABEL)) + return; + + ctx->mtd = mtd; + + alloc_bbt(mtd, apanic_bbt); + scan_bbt(mtd, apanic_bbt); + + if (apanic_good_blocks == 0) { + printk(KERN_ERR "apanic: no any good blocks?!\n"); + goto out_err; + } + + rc = mtd->read(mtd, phy_offset(mtd, 0), mtd->writesize, + &len, ctx->bounce); + if (rc && rc == -EBADMSG) { + printk(KERN_WARNING + "apanic: Bad ECC on block 0 (ignored)\n"); + } else if (rc && rc != -EUCLEAN) { + printk(KERN_ERR "apanic: Error reading block 0 (%d)\n", rc); + goto out_err; + } + + if (len != mtd->writesize) { + printk(KERN_ERR "apanic: Bad read size (%d)\n", rc); + goto out_err; + } + + printk(KERN_INFO "apanic: Bound to mtd partition '%s'\n", mtd->name); + + if (hdr->magic != PANIC_MAGIC) { + printk(KERN_INFO "apanic: No panic data available\n"); + mtd_panic_erase(); + return; + } + + if (hdr->version != PHDR_VERSION) { + printk(KERN_INFO "apanic: Version mismatch (%d != %d)\n", + hdr->version, PHDR_VERSION); + mtd_panic_erase(); + return; + } + + memcpy(&ctx->curr, hdr, sizeof(struct panic_header)); + + printk(KERN_INFO "apanic: c(%u, %u) t(%u, %u)\n", + hdr->console_offset, hdr->console_length, + hdr->threads_offset, hdr->threads_length); + + if (hdr->console_length) { + ctx->apanic_console = create_proc_entry("apanic_console", + S_IFREG | S_IRUGO, NULL); + if (!ctx->apanic_console) + printk(KERN_ERR "%s: failed creating procfile\n", + __func__); + else { + ctx->apanic_console->read_proc = apanic_proc_read; + ctx->apanic_console->write_proc = apanic_proc_write; + ctx->apanic_console->size = hdr->console_length; + ctx->apanic_console->data = (void *) 1; + proc_entry_created = 1; + } + } + + if (hdr->threads_length) { + ctx->apanic_threads = create_proc_entry("apanic_threads", + S_IFREG | S_IRUGO, NULL); + if (!ctx->apanic_threads) + printk(KERN_ERR "%s: failed creating procfile\n", + __func__); + else { + ctx->apanic_threads->read_proc = apanic_proc_read; + ctx->apanic_threads->write_proc = apanic_proc_write; + ctx->apanic_threads->size = hdr->threads_length; + ctx->apanic_threads->data = (void *) 2; + proc_entry_created = 1; + } + } + + if (!proc_entry_created) + mtd_panic_erase(); + + return; +out_err: + ctx->mtd = NULL; +} + +static void mtd_panic_notify_remove(struct mtd_info *mtd) +{ + struct apanic_data *ctx = &drv_ctx; + if (mtd == ctx->mtd) { + ctx->mtd = NULL; + printk(KERN_INFO "apanic: Unbound from %s\n", mtd->name); + } +} + +static struct mtd_notifier mtd_panic_notifier = { + .add = mtd_panic_notify_add, + .remove = mtd_panic_notify_remove, +}; + +static int in_panic = 0; + +static int apanic_writeflashpage(struct mtd_info *mtd, loff_t to, + const u_char *buf) +{ + int rc; + size_t wlen; + int panic = in_interrupt() | in_atomic(); + + if (panic && !mtd->panic_write) { + printk(KERN_EMERG "%s: No panic_write available\n", __func__); + return 0; + } else if (!panic && !mtd->write) { + printk(KERN_EMERG "%s: No write available\n", __func__); + return 0; + } + + to = phy_offset(mtd, to); + if (to == APANIC_INVALID_OFFSET) { + printk(KERN_EMERG "apanic: write to invalid address\n"); + return 0; + } + + if (panic) + rc = mtd->panic_write(mtd, to, mtd->writesize, &wlen, buf); + else + rc = mtd->write(mtd, to, mtd->writesize, &wlen, buf); + + if (rc) { + printk(KERN_EMERG + "%s: Error writing data to flash (%d)\n", + __func__, rc); + return rc; + } + + return wlen; +} + +extern int log_buf_copy(char *dest, int idx, int len); +extern void log_buf_clear(void); + +/* + * Writes the contents of the console to the specified offset in flash. + * Returns number of bytes written + */ +static int apanic_write_console(struct mtd_info *mtd, unsigned int off) +{ + struct apanic_data *ctx = &drv_ctx; + int saved_oip; + int idx = 0; + int rc, rc2; + unsigned int last_chunk = 0; + + while (!last_chunk) { + saved_oip = oops_in_progress; + oops_in_progress = 1; + rc = log_buf_copy(ctx->bounce, idx, mtd->writesize); + if (rc < 0) + break; + + if (rc != mtd->writesize) + last_chunk = rc; + + oops_in_progress = saved_oip; + if (rc <= 0) + break; + if (rc != mtd->writesize) + memset(ctx->bounce + rc, 0, mtd->writesize - rc); + + rc2 = apanic_writeflashpage(mtd, off, ctx->bounce); + if (rc2 <= 0) { + printk(KERN_EMERG + "apanic: Flash write failed (%d)\n", rc2); + return idx; + } + if (!last_chunk) + idx += rc2; + else + idx += last_chunk; + off += rc2; + } + return idx; +} + +static int apanic(struct notifier_block *this, unsigned long event, + void *ptr) +{ + struct apanic_data *ctx = &drv_ctx; + struct panic_header *hdr = (struct panic_header *) ctx->bounce; + int console_offset = 0; + int console_len = 0; + int threads_offset = 0; + int threads_len = 0; + int rc; + + if (in_panic) + return NOTIFY_DONE; + in_panic = 1; +#ifdef CONFIG_PREEMPT + /* Ensure that cond_resched() won't try to preempt anybody */ + add_preempt_count(PREEMPT_ACTIVE); +#endif + touch_softlockup_watchdog(); + + if (!ctx->mtd) + goto out; + + if (ctx->curr.magic) { + printk(KERN_EMERG "Crash partition in use!\n"); + goto out; + } + console_offset = ctx->mtd->writesize; + + /* + * Write out the console + */ + console_len = apanic_write_console(ctx->mtd, console_offset); + if (console_len < 0) { + printk(KERN_EMERG "Error writing console to panic log! (%d)\n", + console_len); + console_len = 0; + } + + /* + * Write out all threads + */ + threads_offset = ALIGN(console_offset + console_len, + ctx->mtd->writesize); + if (!threads_offset) + threads_offset = ctx->mtd->writesize; + + ram_console_enable_console(0); + + log_buf_clear(); + show_state_filter(0); + threads_len = apanic_write_console(ctx->mtd, threads_offset); + if (threads_len < 0) { + printk(KERN_EMERG "Error writing threads to panic log! (%d)\n", + threads_len); + threads_len = 0; + } + + /* + * Finally write the panic header + */ + memset(ctx->bounce, 0, PAGE_SIZE); + hdr->magic = PANIC_MAGIC; + hdr->version = PHDR_VERSION; + + hdr->console_offset = console_offset; + hdr->console_length = console_len; + + hdr->threads_offset = threads_offset; + hdr->threads_length = threads_len; + + rc = apanic_writeflashpage(ctx->mtd, 0, ctx->bounce); + if (rc <= 0) { + printk(KERN_EMERG "apanic: Header write failed (%d)\n", + rc); + goto out; + } + + printk(KERN_EMERG "apanic: Panic dump sucessfully written to flash\n"); + + out: +#ifdef CONFIG_PREEMPT + sub_preempt_count(PREEMPT_ACTIVE); +#endif + in_panic = 0; + return NOTIFY_DONE; +} + +static struct notifier_block panic_blk = { + .notifier_call = apanic, +}; + +static int panic_dbg_get(void *data, u64 *val) +{ + apanic(NULL, 0, NULL); + return 0; +} + +static int panic_dbg_set(void *data, u64 val) +{ + BUG(); + return -1; +} + +DEFINE_SIMPLE_ATTRIBUTE(panic_dbg_fops, panic_dbg_get, panic_dbg_set, "%llu\n"); + +int __init apanic_init(void) +{ + register_mtd_user(&mtd_panic_notifier); + atomic_notifier_chain_register(&panic_notifier_list, &panic_blk); + debugfs_create_file("apanic", 0644, NULL, NULL, &panic_dbg_fops); + memset(&drv_ctx, 0, sizeof(drv_ctx)); + drv_ctx.bounce = (void *) __get_free_page(GFP_KERNEL); + INIT_WORK(&proc_removal_work, apanic_remove_proc_work); + printk(KERN_INFO "Android kernel panic handler initialized (bind=%s)\n", + CONFIG_APANIC_PLABEL); + return 0; +} + +module_init(apanic_init); diff --git a/drivers/misc/arm-charlcd.c b/drivers/misc/arm-charlcd.c new file mode 100644 index 00000000000..9e3879ef58f --- /dev/null +++ b/drivers/misc/arm-charlcd.c @@ -0,0 +1,396 @@ +/* + * Driver for the on-board character LCD found on some ARM reference boards + * This is basically an Hitachi HD44780 LCD with a custom IP block to drive it + * http://en.wikipedia.org/wiki/HD44780_Character_LCD + * Currently it will just display the text "ARM Linux" and the linux version + * + * License terms: GNU General Public License (GPL) version 2 + * Author: Linus Walleij <triad@df.lth.se> + */ +#include <linux/init.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/completion.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/workqueue.h> +#include <generated/utsrelease.h> + +#define DRIVERNAME "arm-charlcd" +#define CHARLCD_TIMEOUT (msecs_to_jiffies(1000)) + +/* Offsets to registers */ +#define CHAR_COM 0x00U +#define CHAR_DAT 0x04U +#define CHAR_RD 0x08U +#define CHAR_RAW 0x0CU +#define CHAR_MASK 0x10U +#define CHAR_STAT 0x14U + +#define CHAR_RAW_CLEAR 0x00000000U +#define CHAR_RAW_VALID 0x00000100U + +/* Hitachi HD44780 display commands */ +#define HD_CLEAR 0x01U +#define HD_HOME 0x02U +#define HD_ENTRYMODE 0x04U +#define HD_ENTRYMODE_INCREMENT 0x02U +#define HD_ENTRYMODE_SHIFT 0x01U +#define HD_DISPCTRL 0x08U +#define HD_DISPCTRL_ON 0x04U +#define HD_DISPCTRL_CURSOR_ON 0x02U +#define HD_DISPCTRL_CURSOR_BLINK 0x01U +#define HD_CRSR_SHIFT 0x10U +#define HD_CRSR_SHIFT_DISPLAY 0x08U +#define HD_CRSR_SHIFT_DISPLAY_RIGHT 0x04U +#define HD_FUNCSET 0x20U +#define HD_FUNCSET_8BIT 0x10U +#define HD_FUNCSET_2_LINES 0x08U +#define HD_FUNCSET_FONT_5X10 0x04U +#define HD_SET_CGRAM 0x40U +#define HD_SET_DDRAM 0x80U +#define HD_BUSY_FLAG 0x80U + +/** + * @dev: a pointer back to containing device + * @phybase: the offset to the controller in physical memory + * @physize: the size of the physical page + * @virtbase: the offset to the controller in virtual memory + * @irq: reserved interrupt number + * @complete: completion structure for the last LCD command + */ +struct charlcd { + struct device *dev; + u32 phybase; + u32 physize; + void __iomem *virtbase; + int irq; + struct completion complete; + struct delayed_work init_work; +}; + +static irqreturn_t charlcd_interrupt(int irq, void *data) +{ + struct charlcd *lcd = data; + u8 status; + + status = readl(lcd->virtbase + CHAR_STAT) & 0x01; + /* Clear IRQ */ + writel(CHAR_RAW_CLEAR, lcd->virtbase + CHAR_RAW); + if (status) + complete(&lcd->complete); + else + dev_info(lcd->dev, "Spurious IRQ (%02x)\n", status); + return IRQ_HANDLED; +} + + +static void charlcd_wait_complete_irq(struct charlcd *lcd) +{ + int ret; + + ret = wait_for_completion_interruptible_timeout(&lcd->complete, + CHARLCD_TIMEOUT); + /* Disable IRQ after completion */ + writel(0x00, lcd->virtbase + CHAR_MASK); + + if (ret < 0) { + dev_err(lcd->dev, + "wait_for_completion_interruptible_timeout() " + "returned %d waiting for ready\n", ret); + return; + } + + if (ret == 0) { + dev_err(lcd->dev, "charlcd controller timed out " + "waiting for ready\n"); + return; + } +} + +static u8 charlcd_4bit_read_char(struct charlcd *lcd) +{ + u8 data; + u32 val; + int i; + + /* If we can, use an IRQ to wait for the data, else poll */ + if (lcd->irq >= 0) + charlcd_wait_complete_irq(lcd); + else { + i = 0; + val = 0; + while (!(val & CHAR_RAW_VALID) && i < 10) { + udelay(100); + val = readl(lcd->virtbase + CHAR_RAW); + i++; + } + + writel(CHAR_RAW_CLEAR, lcd->virtbase + CHAR_RAW); + } + msleep(1); + + /* Read the 4 high bits of the data */ + data = readl(lcd->virtbase + CHAR_RD) & 0xf0; + + /* + * The second read for the low bits does not trigger an IRQ + * so in this case we have to poll for the 4 lower bits + */ + i = 0; + val = 0; + while (!(val & CHAR_RAW_VALID) && i < 10) { + udelay(100); + val = readl(lcd->virtbase + CHAR_RAW); + i++; + } + writel(CHAR_RAW_CLEAR, lcd->virtbase + CHAR_RAW); + msleep(1); + + /* Read the 4 low bits of the data */ + data |= (readl(lcd->virtbase + CHAR_RD) >> 4) & 0x0f; + + return data; +} + +static bool charlcd_4bit_read_bf(struct charlcd *lcd) +{ + if (lcd->irq >= 0) { + /* + * If we'll use IRQs to wait for the busyflag, clear any + * pending flag and enable IRQ + */ + writel(CHAR_RAW_CLEAR, lcd->virtbase + CHAR_RAW); + init_completion(&lcd->complete); + writel(0x01, lcd->virtbase + CHAR_MASK); + } + readl(lcd->virtbase + CHAR_COM); + return charlcd_4bit_read_char(lcd) & HD_BUSY_FLAG ? true : false; +} + +static void charlcd_4bit_wait_busy(struct charlcd *lcd) +{ + int retries = 50; + + udelay(100); + while (charlcd_4bit_read_bf(lcd) && retries) + retries--; + if (!retries) + dev_err(lcd->dev, "timeout waiting for busyflag\n"); +} + +static void charlcd_4bit_command(struct charlcd *lcd, u8 cmd) +{ + u32 cmdlo = (cmd << 4) & 0xf0; + u32 cmdhi = (cmd & 0xf0); + + writel(cmdhi, lcd->virtbase + CHAR_COM); + udelay(10); + writel(cmdlo, lcd->virtbase + CHAR_COM); + charlcd_4bit_wait_busy(lcd); +} + +static void charlcd_4bit_char(struct charlcd *lcd, u8 ch) +{ + u32 chlo = (ch << 4) & 0xf0; + u32 chhi = (ch & 0xf0); + + writel(chhi, lcd->virtbase + CHAR_DAT); + udelay(10); + writel(chlo, lcd->virtbase + CHAR_DAT); + charlcd_4bit_wait_busy(lcd); +} + +static void charlcd_4bit_print(struct charlcd *lcd, int line, const char *str) +{ + u8 offset; + int i; + + /* + * We support line 0, 1 + * Line 1 runs from 0x00..0x27 + * Line 2 runs from 0x28..0x4f + */ + if (line == 0) + offset = 0; + else if (line == 1) + offset = 0x28; + else + return; + + /* Set offset */ + charlcd_4bit_command(lcd, HD_SET_DDRAM | offset); + + /* Send string */ + for (i = 0; i < strlen(str) && i < 0x28; i++) + charlcd_4bit_char(lcd, str[i]); +} + +static void charlcd_4bit_init(struct charlcd *lcd) +{ + /* These commands cannot be checked with the busy flag */ + writel(HD_FUNCSET | HD_FUNCSET_8BIT, lcd->virtbase + CHAR_COM); + msleep(5); + writel(HD_FUNCSET | HD_FUNCSET_8BIT, lcd->virtbase + CHAR_COM); + udelay(100); + writel(HD_FUNCSET | HD_FUNCSET_8BIT, lcd->virtbase + CHAR_COM); + udelay(100); + /* Go to 4bit mode */ + writel(HD_FUNCSET, lcd->virtbase + CHAR_COM); + udelay(100); + /* + * 4bit mode, 2 lines, 5x8 font, after this the number of lines + * and the font cannot be changed until the next initialization sequence + */ + charlcd_4bit_command(lcd, HD_FUNCSET | HD_FUNCSET_2_LINES); + charlcd_4bit_command(lcd, HD_DISPCTRL | HD_DISPCTRL_ON); + charlcd_4bit_command(lcd, HD_ENTRYMODE | HD_ENTRYMODE_INCREMENT); + charlcd_4bit_command(lcd, HD_CLEAR); + charlcd_4bit_command(lcd, HD_HOME); + /* Put something useful in the display */ + charlcd_4bit_print(lcd, 0, "ARM Linux"); + charlcd_4bit_print(lcd, 1, UTS_RELEASE); +} + +static void charlcd_init_work(struct work_struct *work) +{ + struct charlcd *lcd = + container_of(work, struct charlcd, init_work.work); + + charlcd_4bit_init(lcd); +} + +static int __init charlcd_probe(struct platform_device *pdev) +{ + int ret; + struct charlcd *lcd; + struct resource *res; + + lcd = kzalloc(sizeof(struct charlcd), GFP_KERNEL); + if (!lcd) + return -ENOMEM; + + lcd->dev = &pdev->dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + ret = -ENOENT; + goto out_no_resource; + } + lcd->phybase = res->start; + lcd->physize = resource_size(res); + + if (request_mem_region(lcd->phybase, lcd->physize, + DRIVERNAME) == NULL) { + ret = -EBUSY; + goto out_no_memregion; + } + + lcd->virtbase = ioremap(lcd->phybase, lcd->physize); + if (!lcd->virtbase) { + ret = -ENOMEM; + goto out_no_remap; + } + + lcd->irq = platform_get_irq(pdev, 0); + /* If no IRQ is supplied, we'll survive without it */ + if (lcd->irq >= 0) { + if (request_irq(lcd->irq, charlcd_interrupt, IRQF_DISABLED, + DRIVERNAME, lcd)) { + ret = -EIO; + goto out_no_irq; + } + } + + platform_set_drvdata(pdev, lcd); + + /* + * Initialize the display in a delayed work, because + * it is VERY slow and would slow down the boot of the system. + */ + INIT_DELAYED_WORK(&lcd->init_work, charlcd_init_work); + schedule_delayed_work(&lcd->init_work, 0); + + dev_info(&pdev->dev, "initalized ARM character LCD at %08x\n", + lcd->phybase); + + return 0; + +out_no_irq: + iounmap(lcd->virtbase); +out_no_remap: + platform_set_drvdata(pdev, NULL); +out_no_memregion: + release_mem_region(lcd->phybase, SZ_4K); +out_no_resource: + kfree(lcd); + return ret; +} + +static int __exit charlcd_remove(struct platform_device *pdev) +{ + struct charlcd *lcd = platform_get_drvdata(pdev); + + if (lcd) { + free_irq(lcd->irq, lcd); + iounmap(lcd->virtbase); + release_mem_region(lcd->phybase, lcd->physize); + platform_set_drvdata(pdev, NULL); + kfree(lcd); + } + + return 0; +} + +static int charlcd_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct charlcd *lcd = platform_get_drvdata(pdev); + + /* Power the display off */ + charlcd_4bit_command(lcd, HD_DISPCTRL); + return 0; +} + +static int charlcd_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct charlcd *lcd = platform_get_drvdata(pdev); + + /* Turn the display back on */ + charlcd_4bit_command(lcd, HD_DISPCTRL | HD_DISPCTRL_ON); + return 0; +} + +static const struct dev_pm_ops charlcd_pm_ops = { + .suspend = charlcd_suspend, + .resume = charlcd_resume, +}; + +static struct platform_driver charlcd_driver = { + .driver = { + .name = DRIVERNAME, + .owner = THIS_MODULE, + .pm = &charlcd_pm_ops, + }, + .remove = __exit_p(charlcd_remove), +}; + +static int __init charlcd_init(void) +{ + return platform_driver_probe(&charlcd_driver, charlcd_probe); +} + +static void __exit charlcd_exit(void) +{ + platform_driver_unregister(&charlcd_driver); +} + +module_init(charlcd_init); +module_exit(charlcd_exit); + +MODULE_AUTHOR("Linus Walleij <triad@df.lth.se>"); +MODULE_DESCRIPTION("ARM Character LCD Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/audio_io_dev/Kconfig b/drivers/misc/audio_io_dev/Kconfig new file mode 100644 index 00000000000..57bb77172f7 --- /dev/null +++ b/drivers/misc/audio_io_dev/Kconfig @@ -0,0 +1,11 @@ +# +# AB8500 Audio IO Device Driver configuration +# +config STE_AUDIO_IO_DEV + bool "AB8500 Audio IO device driver" + depends on ARCH_U8500 && AB8500_CORE && STM_MSP_I2S + default y + ---help--- + If you say Y here, you will enable the AB8500 Audio IO device driver. + + If unsure, say N. diff --git a/drivers/misc/audio_io_dev/Makefile b/drivers/misc/audio_io_dev/Makefile new file mode 100644 index 00000000000..44b21fcc573 --- /dev/null +++ b/drivers/misc/audio_io_dev/Makefile @@ -0,0 +1,9 @@ +# +# Makefile for AB8500 device drivers +# +obj-$(CONFIG_STE_AUDIO_IO_DEV) += ste_audio_io.o +ste_audio_io-objs := ste_audio_io_dev.o\ + ste_audio_io_core.o\ + ste_audio_io_func.o\ + ste_audio_io_hwctrl_common.o + diff --git a/drivers/misc/audio_io_dev/ste_audio_io_ab8500_reg_defs.h b/drivers/misc/audio_io_dev/ste_audio_io_ab8500_reg_defs.h new file mode 100644 index 00000000000..1436430f7de --- /dev/null +++ b/drivers/misc/audio_io_dev/ste_audio_io_ab8500_reg_defs.h @@ -0,0 +1,349 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Deepak KARDA/ deepak.karda@stericsson.com for ST-Ericsson + * License terms: GNU General Public License (GPL) version 2. + */ + + +#ifndef _AUDIOIO_REG_DEFS_H_ +#define _AUDIOIO_REG_DEFS_H_ + + + /* Registers */ +#define POWER_UP_CONTROL_REG 0x0D00 +#define SOFTWARE_RESET_REG 0x0D01 +#define DIGITAL_AD_CHANNELS_ENABLE_REG 0x0D02 +#define DIGITAL_DA_CHANNELS_ENABLE_REG 0x0D03 +#define LOW_POWER_HS_EAR_CONF_REG 0x0D04 +#define LINE_IN_MIC_CONF_REG 0x0D05 +#define DMIC_ENABLE_REG 0x0D06 +#define ADC_DAC_ENABLE_REG 0x0D07 +#define ANALOG_OUTPUT_ENABLE_REG 0x0D08 +#define DIGITAL_OUTPUT_ENABLE_REG 0x0D09 +#define MUTE_HS_EAR_REG 0x0D0A +#define SHORT_CIRCUIT_DISABLE_REG 0x0D0B +#define NCP_ENABLE_HS_AUTOSTART_REG 0x0D0C +#define ENVELOPE_THRESHOLD_REG 0x0D0D +#define ENVELOPE_DECAY_TIME_REG 0x0D0E +#define VIB_DRIVER_CONF_REG 0x0D0F +#define PWM_VIBNL_CONF_REG 0x0D10 +#define PWM_VIBPL_CONF_REG 0x0D11 +#define PWM_VIBNR_CONF_REG 0x0D12 +#define PWM_VIBPR_CONF_REG 0x0D13 +#define ANALOG_MIC1_GAIN_REG 0x0D14 +#define ANALOG_MIC2_GAIN_REG 0x0D15 +#define ANALOG_HS_GAIN_REG 0x0D16 +#define ANALOG_LINE_IN_GAIN_REG 0x0D17 +#define LINE_IN_TO_HSL_GAIN_REG 0x0D18 +#define LINE_IN_TO_HSR_GAIN_REG 0x0D19 +#define AD_FILTER_CONF_REG 0x0D1A +#define IF0_IF1_MASTER_CONF_REG 0x0D1B +#define IF0_CONF_REG 0x0D1C +#define TDM_IF_BYPASS_B_FIFO_REG 0x0D1D +#define IF1_CONF_REG 0x0D1E +#define AD_ALLOCATION_TO_SLOT0_1_REG 0x0D1F +#define AD_ALLOCATION_TO_SLOT2_3_REG 0x0D20 +#define AD_ALLOCATION_TO_SLOT4_5_REG 0x0D21 +#define AD_ALLOCATION_TO_SLOT6_7_REG 0x0D22 +#define AD_ALLOCATION_TO_SLOT8_9_REG 0x0D23 +#define AD_ALLOCATION_TO_SLOT10_11_REG 0x0D24 +#define AD_ALLOCATION_TO_SLOT12_13_REG 0x0D25 +#define AD_ALLOCATION_TO_SLOT14_15_REG 0x0D26 +#define AD_ALLOCATION_TO_SLOT16_17_REG 0x0D27 +#define AD_ALLOCATION_TO_SLOT18_19_REG 0x0D28 +#define AD_ALLOCATION_TO_SLOT20_21_REG 0x0D29 +#define AD_ALLOCATION_TO_SLOT22_23_REG 0x0D2A +#define AD_ALLOCATION_TO_SLOT24_25_REG 0x0D2B +#define AD_ALLOCATION_TO_SLOT26_27_REG 0x0D2C +#define AD_ALLOCATION_TO_SLOT28_29_REG 0x0D2D +#define AD_ALLOCATION_TO_SLOT30_31_REG 0x0D2E +#define AD_SLOT_0_TO_7_TRISTATE_REG 0x0D2F +#define AD_SLOT_8_TO_15_TRISTATE_REG 0x0D30 +#define AD_SLOT_16_TO_23_TRISTATE_REG 0x0D31 +#define AD_SLOT_24_TO_31_TRISTATE_REG 0x0D32 +#define SLOT_SELECTION_TO_DA1_REG 0x0D33 +#define SLOT_SELECTION_TO_DA2_REG 0x0D34 +#define SLOT_SELECTION_TO_DA3_REG 0x0D35 +#define SLOT_SELECTION_TO_DA4_REG 0x0D36 +#define SLOT_SELECTION_TO_DA5_REG 0x0D37 +#define SLOT_SELECTION_TO_DA6_REG 0x0D38 +#define SLOT_SELECTION_TO_DA7_REG 0x0D39 +#define SLOT_SELECTION_TO_DA8_REG 0x0D3A +#define CLASS_D_EMI_PARALLEL_CONF_REG 0x0D3B +#define CLASS_D_PATH_CONTROL_REG 0x0D3C +#define CLASS_D_DITHER_CONTROL_REG 0x0D3D +#define DMIC_DECIMATOR_FILTER_REG 0x0D3E +#define DIGITAL_MUXES_REG1 0x0D3F +#define DIGITAL_MUXES_REG2 0x0D40 +#define AD1_DIGITAL_GAIN_REG 0x0D41 +#define AD2_DIGITAL_GAIN_REG 0x0D42 +#define AD3_DIGITAL_GAIN_REG 0x0D43 +#define AD4_DIGITAL_GAIN_REG 0x0D44 +#define AD5_DIGITAL_GAIN_REG 0x0D45 +#define AD6_DIGITAL_GAIN_REG 0x0D46 +#define DA1_DIGITAL_GAIN_REG 0x0D47 +#define DA2_DIGITAL_GAIN_REG 0x0D48 +#define DA3_DIGITAL_GAIN_REG 0x0D49 +#define DA4_DIGITAL_GAIN_REG 0x0D4A +#define DA5_DIGITAL_GAIN_REG 0x0D4B +#define DA6_DIGITAL_GAIN_REG 0x0D4C +#define AD1_TO_HFL_DIGITAL_GAIN_REG 0x0D4D +#define AD2_TO_HFR_DIGITAL_GAIN_REG 0x0D4E +#define HSL_EAR_DIGITAL_GAIN_REG 0x0D4F +#define HSR_DIGITAL_GAIN_REG 0x0D50 +#define SIDETONE_FIR1_GAIN_REG 0x0D51 +#define SIDETONE_FIR2_GAIN_REG 0x0D52 +#define ANC_FILTER_CONTROL_REG 0x0D53 +#define ANC_WARPED_GAIN_REG 0x0D54 +#define ANC_FIR_OUTPUT_GAIN_REG 0x0D55 +#define ANC_IIR_OUTPUT_GAIN_REG 0x0D56 +#define ANC_FIR_COEFF_MSB_REG 0x0D57 +#define ANC_FIR_COEFF_LSB_REG 0x0D58 +#define ANC_IIR_COEFF_MSB_REG 0x0D59 +#define ANC_IIR_COEFF_LSB_REG 0x0D5A +#define ANC_WARP_DELAY_MSB_REG 0x0D5B +#define ANC_WARP_DELAY_LSB_REG 0x0D5C +#define ANC_FIR_PEAK_MSB_REG 0x0D5D +#define ANC_FIR_PEAK_LSB_REG 0x0D5E +#define ANC_IIR_PEAK_MSB_REG 0x0D5F +#define ANC_IIR_PEAK_LSB_REG 0x0D60 +#define SIDETONE_FIR_ADDR_REG 0x0D61 +#define SIDETONE_FIR_COEFF_MSB_REG 0x0D62 +#define SIDETONE_FIR_COEFF_LSB_REG 0x0D63 +#define FILTERS_CONTROL_REG 0x0D64 +#define IRQ_MASK_LSB_REG 0x0D65 +#define IRQ_STATUS_LSB_REG 0x0D66 +#define IRQ_MASK_MSB_REG 0x0D67 +#define IRQ_STATUS_MSB_REG 0x0D68 +#define BURST_FIFO_INT_CONTROL_REG 0x0D69 +#define BURST_FIFO_LENGTH_REG 0x0D6A +#define BURST_FIFO_CONTROL_REG 0x0D6B +#define BURST_FIFO_SWITCH_FRAME_REG 0x0D6C +#define BURST_FIFO_WAKE_UP_DELAY_REG 0x0D6D +#define BURST_FIFO_SAMPLES_REG 0x0D6E +#define REVISION_REG 0x0D6F + +/* POWER_UP_CONTROL_REG Masks */ +#define DEVICE_POWER_UP 0x80 +#define ANALOG_PARTS_POWER_UP 0x08 + +/* SOFTWARE_RESET_REG Masks */ +#define SW_RESET 0x80 + +/* DIGITAL_AD_CHANNELS_ENABLE_REG Masks */ +#define EN_AD1 0x80 +#define EN_AD2 0x80 +#define EN_AD3 0x20 +#define EN_AD4 0x20 +#define EN_AD5 0x08 +#define EN_AD6 0x04 + +/* DIGITAL_DA_CHANNELS_ENABLE_REG Masks */ +#define EN_DA1 0x80 +#define EN_DA2 0x40 +#define EN_DA3 0x20 +#define EN_DA4 0x10 +#define EN_DA5 0x08 +#define EN_DA6 0x04 + +/* LOW_POWER_HS_EAR_CONF_REG Masks */ +#define LOW_POWER_HS 0x80 +#define HS_DAC_DRIVER_LP 0x40 +#define HS_DAC_LP 0x20 +#define EAR_DAC_LP 0x10 + +/* LINE_IN_MIC_CONF_REG Masks */ +#define EN_MIC1 0x80 +#define EN_MIC2 0x40 +#define EN_LIN_IN_L 0x20 +#define EN_LIN_IN_R 0x10 +#define MUT_MIC1 0x08 +#define MUT_MIC2 0x04 +#define MUT_LIN_IN_L 0x02 +#define MUT_LIN_IN_R 0x01 + +/* DMIC_ENABLE_REG Masks */ +#define EN_DMIC1 0x80 +#define EN_DMIC2 0x40 +#define EN_DMIC3 0x20 +#define EN_DMIC4 0x10 +#define EN_DMIC5 0x08 +#define EN_DMIC6 0x04 + +/* ADC_DAC_ENABLE_REG Masks */ +#define SEL_MIC1B_CLR_MIC1A 0x80 +#define SEL_LINR_CLR_MIC2 0x40 +#define POWER_UP_HSL_DAC 0x20 +#define POWER_UP_HSR_DAC 0x10 +#define POWER_UP_ADC1 0x04 +#define POWER_UP_ADC3 0x02 +#define POWER_UP_ADC2 0x01 + +/* ANALOG_OUTPUT_ENABLE_REG and DIGITAL_OUTPUT_ENABLE_REG and + MUTE_HS_EAR_REG Masks */ +#define EN_EAR_DAC_MASK 0x04 +#define EN_HSL_DAC_MASK 0x02 +#define EN_HSR_DAC_MASK 0x01 +#define EN_EAR_MASK 0x40 +#define EN_HSL_MASK 0x20 +#define EN_HSR_MASK 0x10 +#define EN_HFL_MASK 0x08 +#define EN_HFR_MASK 0x04 +#define EN_VIBL_MASK 0x02 +#define EN_VIBR_MASK 0x01 + +/* SHORT_CIRCUIT_DISABLE_REG Masks */ +#define HS_SHORT_DIS 0x20 +#define HS_PULL_DOWN_EN 0x10 +#define HS_OSC_EN 0x04 +#define DIS_HS_FAD 0x02 +#define HS_ZCD_DIS 0x01 + +/* NCP_ENABLE_HS_AUTOSTART_REG Masks */ +#define EN_NEG_CP 0x80 +#define HS_AUTO_EN 0x01 + +/* ANALOG_MIC1_GAIN_REG and ANALOG_MIC1_GAIN_REG Masks */ +#define MIC_ANALOG_GAIN_MASK 0x1F + +/*ANALOG_HS_GAIN_REG and ANALOG_LINE_IN_GAIN_REG Masks*/ +#define L_ANALOG_GAIN_MASK 0xF0 +#define R_ANALOG_GAIN_MASK 0x0F + +/* IF0_IF1_MASTER_CONF_REG Masks */ +#define EN_MASTGEN 0x80 +#define BITCLK_OSR_N_64 0x02 +#define BITCLK_OSR_N_128 0x04 +#define BITCLK_OSR_N_256 0x06 +#define EN_FSYNC_BITCLK 0x01 +#define EN_FSYNC_BITCLK1 0x10 + +/* IF0_CONF_REG and IF1_CONF_REG Masks */ +#define FSYNC_FALLING_EDGE 0x40 +#define BITCLK_FALLING_EDGE 0x20 +#define IF_DELAYED 0x10 +#define I2S_LEFT_ALIGNED_FORMAT 0x08 +#define TDM_FORMAT 0x04 +#define WORD_LENGTH_32 0x03 +#define WORD_LENGTH_24 0x02 +#define WORD_LENGTH_20 0x01 +#define WORD_LENGTH_16 0x00 + +/* TDM_IF_BYPASS_B_FIFO_REG Masks */ +#define IF0_BFifoEn 0x01 +#define IF0_MASTER 0x02 + +#define IF1_MASTER 0x20 +/* + * AD_ALLOCATION_TO_SLOT0_1_REG and AD_ALLOCATION_TO_SLOT2_3_REG and + * AD_ALLOCATION_TO_SLOT4_5_REG and AD_ALLOCATION_TO_SLOT6_7_REG Masks + */ +#define DATA_FROM_AD_OUT1 0x00 +#define DATA_FROM_AD_OUT2 0x01 +#define DATA_FROM_AD_OUT3 0x02 +#define DATA_FROM_AD_OUT4 0x03 +#define DATA_FROM_AD_OUT5 0x04 +#define DATA_FROM_AD_OUT6 0x05 +#define DATA_FROM_AD_OUT7 0x06 +#define DATA_FROM_AD_OUT8 0x07 +#define TRISTATE 0x0C + +/* + * SLOT_SELECTION_TO_DA1_REG and SLOT_SELECTION_TO_DA2_REG and + * SLOT_SELECTION_TO_DA3_REG and SLOT_SELECTION_TO_DA4_REG Masks + * SLOT_SELECTION_TO_DA5_REG and SLOT_SELECTION_TO_DA6_REG Masks + */ +#define SLOT08_FOR_DA_PATH 0x08 +#define SLOT09_FOR_DA_PATH 0x09 +#define SLOT10_FOR_DA_PATH 0x0A +#define SLOT11_FOR_DA_PATH 0x0B +#define SLOT12_FOR_DA_PATH 0x0C +#define SLOT13_FOR_DA_PATH 0x0D +#define SLOT14_FOR_DA_PATH 0x0E +#define SLOT15_FOR_DA_PATH 0x0F + +/* DIGITAL_MUXES_REG1 Masks */ +#define DA1_TO_HSL 0x80 +#define DA2_TO_HSR 0x40 +#define SEL_DMIC1_FOR_AD_OUT1 0x20 +#define SEL_DMIC2_FOR_AD_OUT2 0x10 +#define SEL_DMIC3_FOR_AD_OUT3 0x08 +/*#define SEL_DMIC5_FOR_AD_OUT5 0x04*/ +/*#define SEL_DMIC6_FOR_AD_OUT6 0x02*/ +/*#define SEL_DMIC1_FOR_AD_OUT1 0x01*/ + +/* + * AD1_DIGITAL_GAIN_REG and AD2_DIGITAL_GAIN_REG & AD3_DIGITAL_GAIN_REG Masks + * AD4_DIGITAL_GAIN_REG and AD5_DIGITAL_GAIN_REG & AD6_DIGITAL_GAIN_REG Masks + * DA1_DIGITAL_GAIN_REG and DA2_DIGITAL_GAIN_REG & DA3_DIGITAL_GAIN_REG Masks + * DA4_DIGITAL_GAIN_REG and DA5_DIGITAL_GAIN_REG & DA6_DIGITAL_GAIN_REG Masks + */ +#define DIS_FADING 0x40 +#define DIGITAL_GAIN_MASK 0x3F + +/* + * HSL_EAR_DIGITAL_GAIN_REG and HSR_DIGITAL_GAIN_REG Masks + */ +#define FADE_SPEED_MASK 0xC0 +#define DIS_DIG_GAIN_FADING 0x10 +#define HS_DIGITAL_GAIN_MASK 0x0F + +/* FMRx/FMTx Masks */ +#define SLOT24_FOR_DA_PATH 0x18 +#define SEL_AD_OUT8_FROM_DAIN7 0x20 +#define SLOT25_FOR_DA_PATH 0x19 +#define SEL_AD_OUT6_FROM_DAIN8 0x20 +#define SEL_IF8_FROM_AD_OUT7 0x60 +#define SEL_IF17_FROM_AD_OUT7 0x60 +#define SEL_IF16_FROM_AD_OUT8 0x07 + +#define SEL_IF6_FROM_AD_OUT5 0x04 +#define SEL_IF7_FROM_AD_OUT6 0x50 +#define SEL_IF17_FROM_AD_OUT6 0x50 +#define SEL_AD_OUT5_FROM_DAIN7 0x20 + +/* Burst FIFO Control Masks */ +#define WAKEUP_SIGNAL_SAMPLE_COUNT 0x1B +#define BURST_FIFO_TRANSFER_LENGTH 0xC0 +#define BURST_FIFO_INF_RUNNING 0x01 +#define BURST_FIFO_INF_IN_MASTER_MODE 0x02 +#define PRE_BIT_CLK0_COUNT 0x1C +#define BURST_FIFO_WAKUP_DEALAY 0x70 + +/* Filter Control Masks */ +/* SideTone Masks */ +#define SIDETONE_DIGITAL_GAIN_MASK 0x1F +#define FIR1_FROMAD1 0x0C +#define FIR1_FROMAD2 0x03 +#define FIR1_FROMAD3 0x08 +#define FIR1_DAIN1 0x0C + +#define FIR2_FROMAD2 0x00 +#define FIR2_FROMAD3 0x01 +#define FIR2_FROMAD4 0x02 +#define FIR2_DAIN2 0x03 + +#define FIR2_ANDFIR1AD3 0x09 +#define FIR_FILTERCONTROL 0x04 +#define APPLY_FIR_COEFFS_MASK 0x80 + +/* IRQ status masks */ +#define NCP_READY_MASK 0x80 + +/* AB8500 power control Masks */ +#define AB8500_VER_1_0 0x10 +#define AB8500_VER_1_1 0x11 +#define CLK_32K_OUT2_DISABLE 0x01 +#define INACTIVE_RESET_AUDIO 0x02 +#define AB8500_REQ_SYS_CLK 0x08 +#define ENABLE_AUDIO_CLK_TO_AUDIO_BLK 0x10 +#define ENABLE_VINTCORE12_SUPPLY 0x04 +#define VAMIC2_ENABLE 0x10 +#define VAMIC1_ENABLE 0x08 +#define VDMIC_ENABLE 0x04 +#define VAUDIO_ENABLE 0x02 +#define GPIO27_DIR_OUTPUT 0x04 +#define GPIO29_DIR_OUTPUT 0x10 +#define GPIO31_DIR_OUTPUT 0x40 +#define GPIO35_DIR_OUTPUT 0X04 +#endif diff --git a/drivers/misc/audio_io_dev/ste_audio_io_core.c b/drivers/misc/audio_io_dev/ste_audio_io_core.c new file mode 100644 index 00000000000..bf93c6a9053 --- /dev/null +++ b/drivers/misc/audio_io_dev/ste_audio_io_core.c @@ -0,0 +1,1589 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Deepak KARDA/ deepak.karda@stericsson.com for ST-Ericsson + * License terms: GNU General Public License (GPL) version 2. + */ + +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/regulator/consumer.h> +#include <mach/ste_audio_io_vibrator.h> +#include <mach/ste_audio_io.h> + +#include "ste_audio_io_core.h" +#include "ste_audio_io_hwctrl_common.h" +#include "ste_audio_io_ab8500_reg_defs.h" + +static struct audiocodec_context_t *ptr_audio_codec_cnxt; + +static struct clk *clk_ptr_msp1; +static struct clk *clk_ptr_msp3; +static struct clk *clk_ptr_audioclk; +static struct clk *clk_ptr_sysclk; +static struct clk *clk_ptr_ulpclk; + +static struct regulator *regulator_vdmic; +static struct regulator *regulator_vaudio; +static struct regulator *regulator_vamic1; +static struct regulator *regulator_vamic2; +struct regulator *regulator_avsource; + +static void ste_audio_io_init_transducer_cnxt(void); +static int ste_audio_io_core_init_regulators(void); +static int ste_audio_io_core_init_clocks(void); +static int ste_audio_io_core_free_regulators(void); +static int ste_audio_io_core_free_clocks(void); +static int ste_audio_io_enable_audio_clock(void); +static int ste_audio_io_disable_audio_clock(void); + +static struct transducer_context_t transducer_headset = { + .pwr_up_func = ste_audio_io_power_up_headset, + .pwr_down_func = ste_audio_io_power_down_headset, + .set_gain_func = ste_audio_io_set_headset_gain, + .get_gain_func = ste_audio_io_get_headset_gain, + .mute_func = ste_audio_io_mute_headset, + .unmute_func = ste_audio_io_unmute_headset, + .enable_fade_func = ste_audio_io_enable_fade_headset, + .disable_fade_func = ste_audio_io_disable_fade_headset, + .switch_to_burst_func = ste_audio_io_switch_to_burst_mode_headset, + .switch_to_normal_func = ste_audio_io_switch_to_normal_mode_headset +}; + +static struct transducer_context_t transducer_earpiece = { + .pwr_up_func = ste_audio_io_power_up_earpiece, + .pwr_down_func = ste_audio_io_power_down_earpiece, + .set_gain_func = ste_audio_io_set_earpiece_gain, + .get_gain_func = ste_audio_io_get_earpiece_gain, + .mute_func = ste_audio_io_mute_earpiece, + .unmute_func = ste_audio_io_unmute_earpiece, + .enable_fade_func = ste_audio_io_enable_fade_earpiece, + .disable_fade_func = ste_audio_io_disable_fade_earpiece +}; + +static struct transducer_context_t transducer_ihf = { + .pwr_up_func = ste_audio_io_power_up_ihf, + .pwr_down_func = ste_audio_io_power_down_ihf, + .set_gain_func = ste_audio_io_set_ihf_gain, + .get_gain_func = ste_audio_io_get_ihf_gain, + .mute_func = ste_audio_io_mute_ihf, + .unmute_func = ste_audio_io_unmute_ihf, + .enable_fade_func = ste_audio_io_enable_fade_ihf, + .disable_fade_func = ste_audio_io_disable_fade_ihf + +}; + +static struct transducer_context_t transducer_vibl = { + .pwr_up_func = ste_audio_io_power_up_vibl, + .pwr_down_func = ste_audio_io_power_down_vibl, + .set_gain_func = ste_audio_io_set_vibl_gain, + .get_gain_func = ste_audio_io_get_vibl_gain, + .mute_func = ste_audio_io_mute_vibl, + .unmute_func = ste_audio_io_unmute_vibl, + .enable_fade_func = ste_audio_io_enable_fade_vibl, + .disable_fade_func = ste_audio_io_disable_fade_vibl +}; + +static struct transducer_context_t transducer_vibr = { + .pwr_up_func = ste_audio_io_power_up_vibr, + .pwr_down_func = ste_audio_io_power_down_vibr, + .set_gain_func = ste_audio_io_set_vibr_gain, + .get_gain_func = ste_audio_io_get_vibr_gain, + .mute_func = ste_audio_io_mute_vibr, + .unmute_func = ste_audio_io_unmute_vibr, + .enable_fade_func = ste_audio_io_enable_fade_vibr, + .disable_fade_func = ste_audio_io_disable_fade_vibr +}; + +static struct transducer_context_t transducer_mic1a = { + .pwr_up_func = ste_audio_io_power_up_mic1a, + .pwr_down_func = ste_audio_io_power_down_mic1a, + .set_gain_func = ste_audio_io_set_mic1a_gain, + .get_gain_func = ste_audio_io_get_mic1a_gain, + .mute_func = ste_audio_io_mute_mic1a, + .unmute_func = ste_audio_io_unmute_mic1a, + .enable_fade_func = ste_audio_io_enable_fade_mic1a, + .disable_fade_func = ste_audio_io_disable_fade_mic1a +}; + +static struct transducer_context_t transducer_mic1b = { + .pwr_up_func = ste_audio_io_power_up_mic1b, + .pwr_down_func = ste_audio_io_power_down_mic1b, + .set_gain_func = ste_audio_io_set_mic1a_gain, + .get_gain_func = ste_audio_io_get_mic1a_gain, + .mute_func = ste_audio_io_mute_mic1a, + .unmute_func = ste_audio_io_unmute_mic1a, + .enable_fade_func = ste_audio_io_enable_fade_mic1a, + .disable_fade_func = ste_audio_io_disable_fade_mic1a, + .enable_loop = ste_audio_io_enable_loop_mic1b, + .disable_loop = ste_audio_io_disable_loop_mic1b +}; + +static struct transducer_context_t transducer_mic2 = { + .pwr_up_func = ste_audio_io_power_up_mic2, + .pwr_down_func = ste_audio_io_power_down_mic2, + .set_gain_func = ste_audio_io_set_mic2_gain, + .get_gain_func = ste_audio_io_get_mic2_gain, + .mute_func = ste_audio_io_mute_mic2, + .unmute_func = ste_audio_io_unmute_mic2, + .enable_fade_func = ste_audio_io_enable_fade_mic2, + .disable_fade_func = ste_audio_io_disable_fade_mic2 +}; + +static struct transducer_context_t transducer_lin = { + .pwr_up_func = ste_audio_io_power_up_lin, + .pwr_down_func = ste_audio_io_power_down_lin, + .set_gain_func = ste_audio_io_set_lin_gain, + .get_gain_func = ste_audio_io_get_lin_gain, + .mute_func = ste_audio_io_mute_lin, + .unmute_func = ste_audio_io_unmute_lin, + .enable_fade_func = ste_audio_io_enable_fade_lin, + .disable_fade_func = ste_audio_io_disable_fade_lin +}; + +static struct transducer_context_t transducer_dmic12 = { + .pwr_up_func = ste_audio_io_power_up_dmic12, + .pwr_down_func = ste_audio_io_power_down_dmic12, + .set_gain_func = ste_audio_io_set_dmic12_gain, + .get_gain_func = ste_audio_io_get_dmic12_gain, + .mute_func = ste_audio_io_mute_dmic12, + .unmute_func = ste_audio_io_unmute_dmic12, + .enable_fade_func = ste_audio_io_enable_fade_dmic12, + .disable_fade_func = ste_audio_io_disable_fade_dmic12, + .enable_loop = ste_audio_io_enable_loop_dmic12, + .disable_loop = ste_audio_io_disable_loop_dmic12 +}; + +static struct transducer_context_t transducer_dmic34 = { + .pwr_up_func = ste_audio_io_power_up_dmic34, + .pwr_down_func = ste_audio_io_power_down_dmic34, + .set_gain_func = ste_audio_io_set_dmic34_gain, + .get_gain_func = ste_audio_io_get_dmic34_gain, + .mute_func = ste_audio_io_mute_dmic34, + .unmute_func = ste_audio_io_unmute_dmic34, + .enable_fade_func = ste_audio_io_enable_fade_dmic34, + .disable_fade_func = ste_audio_io_disable_fade_dmic34 +}; + +static struct transducer_context_t transducer_dmic56 = { + .pwr_up_func = ste_audio_io_power_up_dmic56, + .pwr_down_func = ste_audio_io_power_down_dmic56, + .set_gain_func = ste_audio_io_set_dmic56_gain, + .get_gain_func = ste_audio_io_get_dmic56_gain, + .mute_func = ste_audio_io_mute_dmic56, + .unmute_func = ste_audio_io_unmute_dmic56, + .enable_fade_func = ste_audio_io_enable_fade_dmic56, + .disable_fade_func = ste_audio_io_disable_fade_dmic56, +}; + +static struct transducer_context_t transducer_fmrx = { + .pwr_up_func = ste_audio_io_power_up_fmrx, + .pwr_down_func = ste_audio_io_power_down_fmrx, +}; + +static struct transducer_context_t transducer_fmtx = { + .pwr_up_func = ste_audio_io_power_up_fmtx, + .pwr_down_func = ste_audio_io_power_down_fmtx, +}; + +static struct transducer_context_t transducer_bluetooth = { + .pwr_up_func = ste_audio_io_power_up_bluetooth, + .pwr_down_func = ste_audio_io_power_down_bluetooth, +}; + +bool ste_audio_io_core_is_ready_for_suspend() +{ + bool err = false; + mutex_lock(&(ptr_audio_codec_cnxt->audio_io_mutex)); + if ((!ptr_audio_codec_cnxt->power_client) && + (!ptr_audio_codec_cnxt->audio_codec_powerup)) + err = true; + mutex_unlock(&(ptr_audio_codec_cnxt->audio_io_mutex)); + return err; +} + +static int ste_audio_io_core_init_regulators() +{ + int error = 0; + regulator_vdmic = regulator_get(NULL, "v-dmic"); + if (IS_ERR(regulator_vdmic)) { + error = PTR_ERR(regulator_vdmic); + dev_err(ptr_audio_codec_cnxt->dev, + "Register error for v-dmic=%d", error); + return error; + } + regulator_vamic1 = regulator_get(NULL, "v-amic1"); + if (IS_ERR(regulator_vamic1)) { + error = PTR_ERR(regulator_vamic1); + dev_err(ptr_audio_codec_cnxt->dev, + "Register error for v-amic1=%d", error); + goto free_regulator_vdmic; + } + regulator_vamic2 = regulator_get(NULL, "v-amic2"); + if (IS_ERR(regulator_vamic2)) { + error = PTR_ERR(regulator_vamic2); + dev_err(ptr_audio_codec_cnxt->dev, + "Register error for v-amic2=%d", error); + goto free_regulator_vdmic_vamic1; + } + regulator_vaudio = regulator_get(NULL, "v-audio"); + if (IS_ERR(regulator_vaudio)) { + error = PTR_ERR(regulator_vaudio); + dev_err(ptr_audio_codec_cnxt->dev, + "Register error for v-audio=%d", error); + goto free_regulator_vdmic_vamic1_vamic2; + } + regulator_avsource = regulator_get(ptr_audio_codec_cnxt->dev, + "vcc-avswitch"); + if (IS_ERR(regulator_avsource)) { + error = PTR_ERR(regulator_avsource); + dev_err(ptr_audio_codec_cnxt->dev, + "Register error for vcc-avswitch =%d", error); + goto free_regulator_vdmic_vamic1_vamic2_vaudio; + } + return error; +free_regulator_vdmic_vamic1_vamic2_vaudio: + regulator_put(regulator_vaudio); +free_regulator_vdmic_vamic1_vamic2: + regulator_put(regulator_vamic2); +free_regulator_vdmic_vamic1: + regulator_put(regulator_vamic1); +free_regulator_vdmic: + regulator_put(regulator_vdmic); + return error; +} + +static int ste_audio_io_core_free_regulators() +{ + regulator_put(regulator_vdmic); + regulator_put(regulator_vamic1); + regulator_put(regulator_vamic2); + regulator_put(regulator_vaudio); + regulator_put(regulator_avsource); + return 0; +} + +static int ste_audio_io_core_init_clocks() +{ + int error = 0; + clk_ptr_sysclk = clk_get(ptr_audio_codec_cnxt->dev, "sysclk"); + if (IS_ERR(clk_ptr_sysclk)) { + error = -EFAULT; + dev_err(ptr_audio_codec_cnxt->dev, + "Sysclk get failed error = %d", error); + return error; + } + clk_ptr_ulpclk = clk_get(ptr_audio_codec_cnxt->dev, "ulpclk"); + if (IS_ERR(clk_ptr_ulpclk)) { + error = -EFAULT; + dev_err(ptr_audio_codec_cnxt->dev, + "Ulpclk get failed error = %d", error); + goto free_sysclk; + } + clk_ptr_audioclk = clk_get(ptr_audio_codec_cnxt->dev, "audioclk"); + if (IS_ERR(clk_ptr_audioclk)) { + error = -EFAULT; + dev_err(ptr_audio_codec_cnxt->dev, + "Audioclk get failed error = %d", error); + goto free_ulpclk; + } + return error; +free_ulpclk: + clk_put(clk_ptr_ulpclk); +free_sysclk: + clk_put(clk_ptr_sysclk); + return error; +} + +static int ste_audio_io_core_free_clocks() +{ + clk_put(clk_ptr_audioclk); + clk_put(clk_ptr_ulpclk); + clk_put(clk_ptr_sysclk); + return 0; +} + +int ste_audio_io_core_api_init_data(struct platform_device *pdev) +{ + struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent); + struct ab8500_platform_data *pdata = dev_get_platdata(ab8500->dev); + int error = 0; + ptr_audio_codec_cnxt = kmalloc(sizeof(struct audiocodec_context_t), + GFP_KERNEL); + if (!ptr_audio_codec_cnxt) + return -ENOMEM; + + memset(ptr_audio_codec_cnxt, 0, sizeof(*ptr_audio_codec_cnxt)); + ptr_audio_codec_cnxt->dev = &pdev->dev; + ptr_audio_codec_cnxt->clk_type = AUDIOIO_ULP_CLK; + mutex_init(&(ptr_audio_codec_cnxt->audio_io_mutex)); + + if (pdata) { + if (pdata->audio) { + ptr_audio_codec_cnxt->gpio_altf_init = + pdata->audio->ste_gpio_altf_init; + ptr_audio_codec_cnxt->gpio_altf_exit = + pdata->audio->ste_gpio_altf_exit; + } + } + + error = ste_audio_io_core_init_regulators(); + if (error) + goto free_audio_codec_cnxt; + error = ste_audio_io_core_init_clocks(); + if (error) + goto free_audio_codec_cnxt_regulators; + ste_audio_io_init_transducer_cnxt(); + return error; + +free_audio_codec_cnxt_regulators: + ste_audio_io_core_free_regulators(); +free_audio_codec_cnxt: + kfree(ptr_audio_codec_cnxt); + return error; +} + +static int ste_audio_io_enable_audio_clock() +{ + int error = 0; + if (ptr_audio_codec_cnxt->is_audio_clk_enabled) + return 0; + + if (AUDIOIO_ULP_CLK == ptr_audio_codec_cnxt->clk_type) { + error = clk_set_parent(clk_ptr_audioclk, clk_ptr_ulpclk); + if (error) { + dev_err(ptr_audio_codec_cnxt->dev, + "Setting Ulpclk as parent failed error = %d", error); + return error; + } + } else { + error = clk_set_parent(clk_ptr_audioclk, clk_ptr_sysclk); + if (error) { + dev_err(ptr_audio_codec_cnxt->dev, + "Setting Sysclk as parent failed error = %d", error); + return error; + } + } + error = clk_enable(clk_ptr_audioclk); + if (error) { + dev_err(ptr_audio_codec_cnxt->dev, + "Audioclk enable failed error = %d", error); + return error; + } + ptr_audio_codec_cnxt->is_audio_clk_enabled = 1; + return error; +} + +static int ste_audio_io_disable_audio_clock() +{ + if (!ptr_audio_codec_cnxt->is_audio_clk_enabled) + return 0; + clk_disable(clk_ptr_audioclk); + ptr_audio_codec_cnxt->is_audio_clk_enabled = 0; + return 0; +} + +static void ste_audio_io_init_transducer_cnxt(void) +{ + ptr_audio_codec_cnxt->transducer[HS_CH] = &transducer_headset; + ptr_audio_codec_cnxt->transducer[EAR_CH] = &transducer_earpiece; + ptr_audio_codec_cnxt->transducer[IHF_CH] = &transducer_ihf; + ptr_audio_codec_cnxt->transducer[VIBL_CH] = &transducer_vibl; + ptr_audio_codec_cnxt->transducer[VIBR_CH] = &transducer_vibr; + ptr_audio_codec_cnxt->transducer[MIC1A_CH] = &transducer_mic1a; + ptr_audio_codec_cnxt->transducer[MIC1B_CH] = &transducer_mic1b; + ptr_audio_codec_cnxt->transducer[MIC2_CH] = &transducer_mic2; + ptr_audio_codec_cnxt->transducer[LIN_CH] = &transducer_lin; + ptr_audio_codec_cnxt->transducer[DMIC12_CH] = &transducer_dmic12; + ptr_audio_codec_cnxt->transducer[DMIC34_CH] = &transducer_dmic34; + ptr_audio_codec_cnxt->transducer[DMIC56_CH] = &transducer_dmic56; + ptr_audio_codec_cnxt->transducer[FMRX_CH] = &transducer_fmrx; + ptr_audio_codec_cnxt->transducer[FMTX_CH] = &transducer_fmtx; + ptr_audio_codec_cnxt->transducer[BLUETOOTH_CH] = &transducer_bluetooth; +} + +void ste_audio_io_core_api_free_data(void) +{ + ste_audio_io_core_free_regulators(); + ste_audio_io_core_free_clocks(); + kfree(ptr_audio_codec_cnxt); +} + +static int ste_audio_io_core_api_enable_regulators(int channel_type) +{ + int error = 0; + + switch (channel_type) { + case EAR_CH: + case HS_CH: + case IHF_CH: + case VIBL_CH: + case VIBR_CH: + case LIN_CH: + case FMRX_CH: + case FMTX_CH: + case BLUETOOTH_CH: + /* vaduio already enabled + no additional regualtor required */ + break; + + case MIC1A_CH: + case MIC1B_CH: + error = regulator_enable(regulator_vamic1); + if (error) + dev_err(ptr_audio_codec_cnxt->dev, + "unable to enable regulator vamic1 error = %d", error); + break; + + case MIC2_CH: + error = regulator_enable(regulator_vamic2); + if (error) + dev_err(ptr_audio_codec_cnxt->dev, + "unable to enable regulator vamic2 error = %d", error); + break; + + case DMIC12_CH: + case DMIC34_CH: + case DMIC56_CH: + case MULTI_MIC_CH: + error = regulator_enable(regulator_vdmic); + if (error) + dev_err(ptr_audio_codec_cnxt->dev, + "unable to enable regulator vdmic error = %d", error); + } + return error; +} + +static int ste_audio_io_core_api_disable_regulators(int channel_type) +{ + int error = 0; + + switch (channel_type) { + case EAR_CH: + case HS_CH: + case IHF_CH: + case VIBL_CH: + case VIBR_CH: + case LIN_CH: + case FMRX_CH: + case FMTX_CH: + case BLUETOOTH_CH: + /* no need to disable separately*/ + break; + + case MIC1A_CH: + case MIC1B_CH: + error = regulator_disable(regulator_vamic1); + if (error) + dev_err(ptr_audio_codec_cnxt->dev, + "unable to disable regulator vamic1 error = %d", error); + break; + + case MIC2_CH: + error = regulator_disable(regulator_vamic2); + if (error) + dev_err(ptr_audio_codec_cnxt->dev, + "unable to disable regulator vamic2 error = %d", error); + break; + + case DMIC12_CH: + case DMIC34_CH: + case DMIC56_CH: + case MULTI_MIC_CH: + error = regulator_disable(regulator_vdmic); + if (error) + dev_err(ptr_audio_codec_cnxt->dev, + "unable to disable regulator vdmic error = %d", error); + } + return error; +} + +int ste_audio_io_core_api_powerup_audiocodec(int power_client) +{ + int error = 0; + int acodec_device_id; + __u8 data, old_data; + /* aquire mutex */ + mutex_lock(&(ptr_audio_codec_cnxt->audio_io_mutex)); + + acodec_device_id = abx500_get_chip_id(&ste_audio_io_device->dev); + + /* + * If there is no power client registered, power up + * common audio blocks for audio and vibrator + */ + if (!ptr_audio_codec_cnxt->power_client) { + error = ste_audio_io_enable_audio_clock(); + if (error) { + dev_err(ptr_audio_codec_cnxt->dev, + "Unable to enable audio clock = %d", error); + goto err_cleanup; + } + old_data = HW_REG_READ(AB8500_CTRL3_REG); + + /* Enable 32 Khz clock signal on Clk32KOut2 ball */ + data = (~CLK_32K_OUT2_DISABLE) & old_data; + error = HW_REG_WRITE(AB8500_CTRL3_REG, data); + if (error) { + dev_err(ptr_audio_codec_cnxt->dev, + "enabling 32KHz clock error = %d", error); + goto err_cleanup; + } + data = INACTIVE_RESET_AUDIO | old_data; + error = HW_REG_WRITE(AB8500_CTRL3_REG, data); + if (error) { + dev_err(ptr_audio_codec_cnxt->dev, + "deactivate audio codec reset error = %d", error); + goto err_cleanup; + } + regulator_enable(regulator_vaudio); + + old_data = HW_REG_READ(AB8500_GPIO_DIR4_REG); + data = (GPIO27_DIR_OUTPUT | GPIO29_DIR_OUTPUT | + GPIO31_DIR_OUTPUT) | old_data; + error = HW_REG_WRITE(AB8500_GPIO_DIR4_REG, data); + if (error) { + dev_err(ptr_audio_codec_cnxt->dev, + "setting gpio dir4 error = %d", error); + goto err_cleanup; + } + error = HW_REG_WRITE(SOFTWARE_RESET_REG, SW_RESET); + if (error != 0) { + dev_err(ptr_audio_codec_cnxt->dev, + "Software reset error=%d", error); + goto err_cleanup; + } + + error = HW_ACODEC_MODIFY_WRITE(POWER_UP_CONTROL_REG, + (DEVICE_POWER_UP|ANALOG_PARTS_POWER_UP), 0); + if (error != 0) { + dev_err(ptr_audio_codec_cnxt->dev, + "Device Power Up, error=%d", error); + goto err_cleanup; + } + } + /* Save information that given client already powered up audio block */ + ptr_audio_codec_cnxt->power_client |= power_client; + + /* If audio block requested power up, turn on additional audio blocks */ + if (power_client == STE_AUDIOIO_POWER_AUDIO) { + if (!ptr_audio_codec_cnxt->audio_codec_powerup) { + clk_ptr_msp1 = clk_get_sys("msp1", NULL); + if (!IS_ERR(clk_ptr_msp1)) { + error = clk_enable(clk_ptr_msp1); + if (error) + goto err_cleanup; + } else { + error = -EFAULT; + goto err_cleanup; + } + + if (AB8500_REV_20 <= acodec_device_id) { + clk_ptr_msp3 = clk_get_sys("msp3", NULL); + if (!IS_ERR(clk_ptr_msp3)) { + error = clk_enable(clk_ptr_msp3); + if (error) + goto err_cleanup; + } else { + error = -EFAULT; + goto err_cleanup; + } + } + + if (ptr_audio_codec_cnxt->gpio_altf_init) { + error = ptr_audio_codec_cnxt->gpio_altf_init(); + if (error) + goto err_cleanup; + } + + error = HW_ACODEC_MODIFY_WRITE(IF0_IF1_MASTER_CONF_REG, + EN_MASTGEN, 0); + if (error != 0) { + dev_err(ptr_audio_codec_cnxt->dev, + "Enable Master Generator, error=%d", error); + goto err_cleanup; + } + + error = HW_ACODEC_MODIFY_WRITE(TDM_IF_BYPASS_B_FIFO_REG, + IF0_MASTER, 0); + if (error != 0) { + dev_err(ptr_audio_codec_cnxt->dev, + "IF0: Master Mode, error=%d", error); + goto err_cleanup; + } + + /* Configuring IF0 */ + + error = HW_ACODEC_MODIFY_WRITE(IF0_IF1_MASTER_CONF_REG, + BITCLK_OSR_N_256, 0); + if (error != 0) { + dev_err(ptr_audio_codec_cnxt->dev, + "IF0: Enable FsBitClk & FSync error=%d", error); + goto err_cleanup; + } + + error = HW_REG_WRITE(IF0_CONF_REG, IF_DELAYED + | TDM_FORMAT | WORD_LENGTH_20); + if (error != 0) { + dev_err(ptr_audio_codec_cnxt->dev, + "IF0: TDM Format 16 Bits word length, error=%d", + error); + goto err_cleanup; + } + } + ptr_audio_codec_cnxt->audio_codec_powerup++; + } +err_cleanup: + /* release mutex */ + mutex_unlock(&(ptr_audio_codec_cnxt->audio_io_mutex)); + return error; +} + +int ste_audio_io_core_api_powerdown_audiocodec(int power_client) +{ + int error = 0; + /* aquire mutex */ + mutex_lock(&(ptr_audio_codec_cnxt->audio_io_mutex)); + + /* Update power client status */ + if (power_client == STE_AUDIOIO_POWER_AUDIO) { + ptr_audio_codec_cnxt->audio_codec_powerup--; + if (!ptr_audio_codec_cnxt->audio_codec_powerup) { + ptr_audio_codec_cnxt->power_client &= ~power_client; + ste_audio_io_disable_audio_clock(); + clk_disable(clk_ptr_msp1); + clk_put(clk_ptr_msp1); + if (AB8500_REV_20 <= + abx500_get_chip_id(&ste_audio_io_device->dev)) { + clk_disable(clk_ptr_msp3); + clk_put(clk_ptr_msp3); + } + + if (ptr_audio_codec_cnxt->gpio_altf_exit) { + error = ptr_audio_codec_cnxt->gpio_altf_exit(); + if (error) + goto err_cleanup; + } + } + } else + ptr_audio_codec_cnxt->power_client &= ~power_client; + + /* If no power client registered, power down audio block */ + if (!ptr_audio_codec_cnxt->power_client) { + regulator_disable(regulator_vaudio); + ste_audio_io_disable_audio_clock(); + if (error != 0) { + dev_err(ptr_audio_codec_cnxt->dev, + "Device Power Down and Analog Parts Power Down error = %d ", + error); + goto err_cleanup; + } + } + +err_cleanup: + /* release mutex */ + mutex_unlock(&(ptr_audio_codec_cnxt->audio_io_mutex)); + return error; +} +/** + * @brief Read from AB8500 device + * @dev_data Pointer to the structure __audioio_data + * @return 0 + */ + +int ste_audio_io_core_api_access_read(struct audioio_data_t *dev_data) +{ + int reg; + if (NULL == dev_data) + return -EFAULT; + reg = (dev_data->block<<8)|(dev_data->addr&0xff); + dev_data->data = HW_REG_READ(reg); + return 0; +} +/** + * @brief Write on AB8500 device + * @dev_data Pointer to the structure __audioio_data + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_core_api_access_write(struct audioio_data_t *dev_data) +{ + int retval, reg; + if (NULL == dev_data) + return -EFAULT; + + reg = (dev_data->block<<8)|(dev_data->addr&0xff); + retval = HW_REG_WRITE(reg, dev_data->data); + + return retval; +} +/** + * @brief Store the power and mute status of transducer + * @channel_index Channel-index of transducer + * @ptr Array storing the status + * @value status being stored + * @return 0 on success otherwise negative error code + */ + +void ste_audio_io_core_api_store_data(enum AUDIOIO_CH_INDEX channel_index, + int *ptr, int value) +{ + if (channel_index & e_CHANNEL_1) + ptr[0] = value; + + if (channel_index & e_CHANNEL_2) + ptr[1] = value; + + if (channel_index & e_CHANNEL_3) + ptr[2] = value; + + if (channel_index & e_CHANNEL_4) + ptr[3] = value; +} +/** + * @brief Get power or mute status on a specific channel + * @channel_index Channel-index of the transducer + * @ptr Pointer to is_power_up array or is_muted array + * @return status of control switch + */ +enum AUDIOIO_COMMON_SWITCH ste_audio_io_core_api_get_status( + enum AUDIOIO_CH_INDEX channel_index, int *ptr) +{ + if (channel_index & e_CHANNEL_1) { + if (AUDIOIO_TRUE == ptr[0]) + return AUDIOIO_COMMON_ON; + else + return AUDIOIO_COMMON_OFF; + } + + if (channel_index & e_CHANNEL_2) { + if (AUDIOIO_TRUE == ptr[1]) + return AUDIOIO_COMMON_ON; + else + return AUDIOIO_COMMON_OFF; + } + + if (channel_index & e_CHANNEL_3) { + if (AUDIOIO_TRUE == ptr[2]) + return AUDIOIO_COMMON_ON; + else + return AUDIOIO_COMMON_OFF; + } + + if (channel_index & e_CHANNEL_4) { + if (AUDIOIO_TRUE == ptr[3]) + return AUDIOIO_COMMON_ON; + else + return AUDIOIO_COMMON_OFF; + } + return 0; +} + +int ste_audio_io_core_api_acodec_power_control(struct audioio_acodec_pwr_ctrl_t + *audio_acodec_pwr_ctrl) +{ + int error = 0; + if (audio_acodec_pwr_ctrl->ctrl_switch == AUDIOIO_COMMON_ON) + error = ste_audio_io_core_api_powerup_audiocodec( + STE_AUDIOIO_POWER_AUDIO); + else + error = ste_audio_io_core_api_powerdown_audiocodec( + STE_AUDIOIO_POWER_AUDIO); + + return error; +} +/** + * @brief Control for powering on/off HW components on a specific channel + * @pwr_ctrl Pointer to the structure __audioio_pwr_ctrl + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_core_api_power_control_transducer( + struct audioio_pwr_ctrl_t *pwr_ctrl) +{ + int error = 0; + struct transducer_context_t *ptr = NULL; + enum AUDIOIO_CH_INDEX channel_index; + + channel_index = pwr_ctrl->channel_index; + + if ((pwr_ctrl->channel_type < FIRST_CH) + || (pwr_ctrl->channel_type > LAST_CH)) + return -EINVAL; + + ptr = ptr_audio_codec_cnxt->transducer[pwr_ctrl->channel_type]; + + /* aquire mutex */ + mutex_lock(&(ptr_audio_codec_cnxt->audio_io_mutex)); + + if (AUDIOIO_COMMON_ON == pwr_ctrl->ctrl_switch) { + if (ptr->pwr_up_func) { + error = ste_audio_io_core_api_enable_regulators( + pwr_ctrl->channel_type); + if (error) + goto free_mutex; + + error = ptr->pwr_up_func(pwr_ctrl->channel_index, + ptr_audio_codec_cnxt->dev); + if (0 == error) { + ste_audio_io_core_api_store_data(channel_index, + ptr->is_power_up, AUDIOIO_TRUE); + } + } + } else { + if (ptr->pwr_down_func) { + error = ptr->pwr_down_func(pwr_ctrl->channel_index, + ptr_audio_codec_cnxt->dev); + if (0 == error) { + ste_audio_io_core_api_store_data(channel_index, + ptr->is_power_up, AUDIOIO_FALSE); + } + error = ste_audio_io_core_api_disable_regulators( + pwr_ctrl->channel_type); + } + } + +free_mutex: + /* release mutex */ + mutex_unlock(&(ptr_audio_codec_cnxt->audio_io_mutex)); + + return error; +} +/** + * @brief Query power state of HW path on specified channel + * @pwr_ctrl Pointer to the structure __audioio_pwr_ctrl + * @return 0 on success otherwise negative error code + */ + +int ste_audio_io_core_api_power_status_transducer( + struct audioio_pwr_ctrl_t *pwr_ctrl) +{ + + struct transducer_context_t *ptr = NULL; + enum AUDIOIO_CH_INDEX channel_index; + + channel_index = pwr_ctrl->channel_index; + + if ((pwr_ctrl->channel_type < FIRST_CH) + || (pwr_ctrl->channel_type > LAST_CH)) + return -EINVAL; + + ptr = ptr_audio_codec_cnxt->transducer[pwr_ctrl->channel_type]; + + + /* aquire mutex */ + mutex_lock(&(ptr_audio_codec_cnxt->audio_io_mutex)); + + + pwr_ctrl->ctrl_switch = ste_audio_io_core_api_get_status(channel_index, + ptr->is_power_up); + + /* release mutex */ + mutex_unlock(&(ptr_audio_codec_cnxt->audio_io_mutex)); + + return 0; + +} + +int ste_audio_io_core_api_loop_control(struct audioio_loop_ctrl_t *loop_ctrl) +{ + int error = 0; + struct transducer_context_t *ptr = NULL; + + if ((loop_ctrl->channel_type < FIRST_CH) + || (loop_ctrl->channel_type > LAST_CH)) + return -EINVAL; + + ptr = ptr_audio_codec_cnxt->transducer[loop_ctrl->channel_type]; + + mutex_lock(&(ptr_audio_codec_cnxt->audio_io_mutex)); + + if (AUDIOIO_COMMON_ON == loop_ctrl->ctrl_switch) { + if (ptr->enable_loop) { + error = ptr->enable_loop(loop_ctrl->channel_index, + loop_ctrl->hw_loop, + loop_ctrl->loop_gain, + ptr_audio_codec_cnxt->dev, + ptr_audio_codec_cnxt->transducer); + if (error) + dev_err(ptr_audio_codec_cnxt->dev, + "Loop enable failed for hw loop = %d, error = %d ", + (int)loop_ctrl->hw_loop, error); + } else { + error = -EFAULT; + dev_err(ptr_audio_codec_cnxt->dev, + "Hw Loop enable does not exist for channel= %d, error = %d ", + (int)loop_ctrl->channel_type, error); + } + } else { + if (ptr->disable_loop) { + error = ptr->disable_loop(loop_ctrl->channel_index, + loop_ctrl->hw_loop, + ptr_audio_codec_cnxt->dev, + ptr_audio_codec_cnxt->transducer); + if (error) + dev_err(ptr_audio_codec_cnxt->dev, + "Loop disable failed for hw loop = %d, error = %d ", + (int)loop_ctrl->hw_loop, error); + } else { + error = -EFAULT; + dev_err(ptr_audio_codec_cnxt->dev, + "Hw Loop disable does not exist for channel= %d, error = %d ", + (int)loop_ctrl->channel_type, error); + } + } + + mutex_unlock(&(ptr_audio_codec_cnxt->audio_io_mutex)); + + return error; +} + +int ste_audio_io_core_api_loop_status(struct audioio_loop_ctrl_t *loop_ctrl) +{ + return 0; +} + +int ste_audio_io_core_api_get_transducer_gain_capability( + struct audioio_get_gain_t *get_gain) +{ + return 0; +} + +int ste_audio_io_core_api_gain_capabilities_loop( + struct audioio_gain_loop_t *gain_loop) +{ + if ((gain_loop->channel_type < FIRST_CH) + || (gain_loop->channel_type > LAST_CH)) + return -EINVAL; + + mutex_lock(&(ptr_audio_codec_cnxt->audio_io_mutex)); + + gain_loop->num_loop = + transducer_max_no_Of_supported_loops[gain_loop->channel_type]; + gain_loop->max_gains = max_no_of_loop_gains[gain_loop->channel_type]; + mutex_unlock(&(ptr_audio_codec_cnxt->audio_io_mutex)); + return 0; +} + +int ste_audio_io_core_api_supported_loops( + struct audioio_support_loop_t *support_loop) +{ + if ((support_loop->channel_type < FIRST_CH) + || (support_loop->channel_type > LAST_CH)) + return -EINVAL; + + mutex_lock(&(ptr_audio_codec_cnxt->audio_io_mutex)); + support_loop->spprtd_loop_index = + transducer_no_Of_supported_loop_indexes[support_loop->channel_type]; + mutex_unlock(&(ptr_audio_codec_cnxt->audio_io_mutex)); + return 0; +} + +int ste_audio_io_core_api_gain_descriptor_transducer( + struct audioio_gain_desc_trnsdr_t *gdesc_trnsdr) +{ + return 0; +} +/** + * @brief Control for muting a specific channel in HW + * @mute_trnsdr Pointer to the structure __audioio_mute_trnsdr + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_core_api_mute_control_transducer( + struct audioio_mute_trnsdr_t *mute_trnsdr) +{ + int error = 0; + struct transducer_context_t *ptr = NULL; + enum AUDIOIO_CH_INDEX channel_index; + + channel_index = mute_trnsdr->channel_index; + + if ((mute_trnsdr->channel_type < FIRST_CH) + || (mute_trnsdr->channel_type > LAST_CH)) + return -EINVAL; + + ptr = ptr_audio_codec_cnxt->transducer[mute_trnsdr->channel_type]; + + /* aquire mutex */ + mutex_lock(&(ptr_audio_codec_cnxt->audio_io_mutex)); + + if (AUDIOIO_COMMON_ON == mute_trnsdr->ctrl_switch) { + if (ptr->mute_func) { + error = ptr->mute_func(mute_trnsdr->channel_index, + ptr_audio_codec_cnxt->dev); + if (0 == error) { + ste_audio_io_core_api_store_data(channel_index , + ptr->is_muted, AUDIOIO_TRUE); + } + } + } else { + if (ptr->unmute_func) { + if (0 == ptr->unmute_func(channel_index, ptr->gain, + ptr_audio_codec_cnxt->dev)) { + ste_audio_io_core_api_store_data(channel_index, + ptr->is_muted, AUDIOIO_FALSE); + } + } + } + + /* release mutex */ + mutex_unlock(&(ptr_audio_codec_cnxt->audio_io_mutex)); + + return error; +} +/** + * @brief Query state of mute on specified channel + * @mute_trnsdr Pointer to the structure __audioio_mute_trnsdr + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_core_api_mute_status_transducer( + struct audioio_mute_trnsdr_t *mute_trnsdr) +{ + struct transducer_context_t *ptr = NULL; + enum AUDIOIO_CH_INDEX channel_index; + + channel_index = mute_trnsdr->channel_index; + + if ((mute_trnsdr->channel_type < FIRST_CH) + || (mute_trnsdr->channel_type > LAST_CH)) + return -EINVAL; + + ptr = ptr_audio_codec_cnxt->transducer[mute_trnsdr->channel_type]; + + /* aquire mutex */ + mutex_lock(&(ptr_audio_codec_cnxt->audio_io_mutex)); + + mute_trnsdr->ctrl_switch = ste_audio_io_core_api_get_status( + channel_index, ptr->is_muted); + /* release mutex */ + mutex_unlock(&(ptr_audio_codec_cnxt->audio_io_mutex)); + + return 0; +} +/** + * @brief control the fading on the transducer called on. + * @fade_ctrl Pointer to the structure __audioio_fade_ctrl + * @return 0 on success otherwise negative error code + */ + +int ste_audio_io_core_api_fading_control(struct audioio_fade_ctrl_t *fade_ctrl) +{ + int error = 0; + struct transducer_context_t *ptr = NULL; + + if ((fade_ctrl->channel_type < FIRST_CH) + || (fade_ctrl->channel_type > LAST_CH)) + return -EINVAL; + ptr = ptr_audio_codec_cnxt->transducer[fade_ctrl->channel_type]; + + /* aquire mutex */ + mutex_lock(&(ptr_audio_codec_cnxt->audio_io_mutex)); + + if (AUDIOIO_COMMON_ON == fade_ctrl->ctrl_switch) + error = ptr->enable_fade_func(ptr_audio_codec_cnxt->dev); + + else + error = ptr->disable_fade_func(ptr_audio_codec_cnxt->dev); + + + /* release mutex */ + mutex_unlock(&(ptr_audio_codec_cnxt->audio_io_mutex)); + + return error; +} +/** + * @brief control the low power mode of headset. + * @burst_ctrl Pointer to the structure __audioio_burst_ctrl + * @return 0 on success otherwise negative error code + */ + +int ste_audio_io_core_api_burstmode_control( + struct audioio_burst_ctrl_t *burst_ctrl) +{ + int error = 0; + struct transducer_context_t *ptr = NULL; + int burst_fifo_switch_frame; + + burst_fifo_switch_frame = burst_ctrl->burst_fifo_switch_frame; + + if ((burst_ctrl->channel_type < FIRST_CH) + || (burst_ctrl->channel_type > LAST_CH)) + return -EINVAL; + ptr = ptr_audio_codec_cnxt->transducer[burst_ctrl->channel_type]; + + /* aquire mutex */ + mutex_lock(&(ptr_audio_codec_cnxt->audio_io_mutex)); + + if (AUDIOIO_COMMON_ON == burst_ctrl->ctrl_switch) { + if (ptr->switch_to_burst_func) + error = ptr->switch_to_burst_func( + burst_fifo_switch_frame, + ptr_audio_codec_cnxt->dev); + } else + if (ptr->switch_to_normal_func) + error = ptr->switch_to_normal_func( + ptr_audio_codec_cnxt->dev); + /* release mutex */ + mutex_unlock(&(ptr_audio_codec_cnxt->audio_io_mutex)); + + return error; +} +/** + * @brief Convert channel index to array index + * @channel_index Channel Index of transducer + * @return Array index corresponding to the specified channel index + */ + +int convert_channel_index_to_array_index(enum AUDIOIO_CH_INDEX channel_index) +{ + if (channel_index & e_CHANNEL_1) + return 0; + else if (channel_index & e_CHANNEL_2) + return 1; + else if (channel_index & e_CHANNEL_3) + return 2; + else + return 3; +} + +/** + * @brief Set individual gain along the HW path of a specified channel + * @gctrl_trnsdr Pointer to the structure __audioio_gain_ctrl_trnsdr + * @return 0 on success otherwise negative error code + */ + +int ste_audio_io_core_api_gain_control_transducer( + struct audioio_gain_ctrl_trnsdr_t *gctrl_trnsdr) +{ + struct transducer_context_t *ptr = NULL; + enum AUDIOIO_CH_INDEX channel_index; + int ch_array_index; + u16 gain_index; + int gain_value; + u32 linear; + int channel_type; + int error; + int min_gain, max_gain, gain; + + if ((gctrl_trnsdr->channel_type < FIRST_CH) + || (gctrl_trnsdr->channel_type > LAST_CH)) + return -EINVAL; + + if (gctrl_trnsdr->gain_index >= MAX_NO_GAINS) + return -EINVAL; + + ptr = ptr_audio_codec_cnxt->transducer[gctrl_trnsdr->channel_type]; + channel_index = gctrl_trnsdr->channel_index; + gain_index = gctrl_trnsdr->gain_index; + gain_value = gctrl_trnsdr->gain_value; + linear = gctrl_trnsdr->linear; + channel_type = gctrl_trnsdr->channel_type; + + ch_array_index = convert_channel_index_to_array_index(channel_index); + if (linear) { /* Gain is in the range 0 to 100 */ + min_gain = gain_descriptor[channel_type]\ + [ch_array_index][gain_index].min_gain; + max_gain = gain_descriptor[channel_type]\ + [ch_array_index][gain_index].max_gain; + + gain = ((gain_value * (max_gain - min_gain))/100) + min_gain; + } else + /* Convert to db */ + gain = gain_value/100; + + gain_value = gain; + +#if 1 + if (gain_index >= transducer_no_of_gains[channel_type]) + return -EINVAL; + + if (gain_value < gain_descriptor[channel_type]\ + [ch_array_index][gain_index].min_gain) + return -EINVAL; + + if (gain_value > gain_descriptor[channel_type]\ + [ch_array_index][gain_index].max_gain) + return -EINVAL; + +#endif + + /* aquire mutex */ + mutex_lock(&(ptr_audio_codec_cnxt->audio_io_mutex)); + + error = ptr->set_gain_func(channel_index, + gain_index, gain_value, linear, + ptr_audio_codec_cnxt->dev); + if (0 == error) + ste_audio_io_core_api_store_data(channel_index , + ptr->gain, gain_value); + + + /* release mutex */ + mutex_unlock(&(ptr_audio_codec_cnxt->audio_io_mutex)); + + return error; +} +/** + * @brief Get individual gain along the HW path of a specified channel + * @gctrl_trnsdr Pointer to the structure __audioio_gain_ctrl_trnsdr + * @return 0 on success otherwise negative error code + */ + + +int ste_audio_io_core_api_gain_query_transducer( + struct audioio_gain_ctrl_trnsdr_t *gctrl_trnsdr) +{ + struct transducer_context_t *ptr = NULL; + enum AUDIOIO_CH_INDEX channel_index; + u16 gain_index; + u32 linear; + int left_volume, right_volume; + int max_gain, min_gain; + int ch_array_index; + + if ((gctrl_trnsdr->channel_type < FIRST_CH) + || (gctrl_trnsdr->channel_type > LAST_CH)) + return -EINVAL; + + if (gctrl_trnsdr->gain_index >= MAX_NO_GAINS) + return -EINVAL; + + ptr = ptr_audio_codec_cnxt->transducer[gctrl_trnsdr->channel_type]; + + channel_index = gctrl_trnsdr->channel_index; + gain_index = gctrl_trnsdr->gain_index; + linear = gctrl_trnsdr->linear; + + ptr->get_gain_func(&left_volume, &right_volume, gain_index, + ptr_audio_codec_cnxt->dev); + + ch_array_index = convert_channel_index_to_array_index(channel_index); + max_gain = gain_descriptor[gctrl_trnsdr->channel_type]\ + [ch_array_index][gain_index].max_gain; + min_gain = gain_descriptor[gctrl_trnsdr->channel_type]\ + [ch_array_index][gain_index].min_gain; + + switch (channel_index) { + case e_CHANNEL_1: + gctrl_trnsdr->gain_value = linear ? \ + min_gain+left_volume*(max_gain-min_gain)/100 : left_volume; + break; + case e_CHANNEL_2: + gctrl_trnsdr->gain_value = linear ? \ + min_gain+right_volume*(max_gain-min_gain)/100 : right_volume; + break; + case e_CHANNEL_3: + break; + case e_CHANNEL_4: + break; + case e_CHANNEL_ALL: + if (left_volume == right_volume) { + if (linear) + gctrl_trnsdr->gain_value = + min_gain+right_volume*(max_gain-min_gain)/100; + else + gctrl_trnsdr->gain_value = right_volume; + } + } + + return 0; +} + + +int ste_audio_io_core_api_fsbitclk_control( + struct audioio_fsbitclk_ctrl_t *fsbitclk_ctrl) +{ + int error = 0; + + if (AUDIOIO_COMMON_ON == fsbitclk_ctrl->ctrl_switch) + error = HW_ACODEC_MODIFY_WRITE(IF0_IF1_MASTER_CONF_REG, + EN_FSYNC_BITCLK, 0); + else + error = HW_ACODEC_MODIFY_WRITE(IF0_IF1_MASTER_CONF_REG, 0, + EN_FSYNC_BITCLK); + + return error; +} +int ste_audio_io_core_api_pseudoburst_control( + struct audioio_pseudoburst_ctrl_t *pseudoburst_ctrl) +{ + int error = 0; + + return error; +} +int ste_audio_io_core_debug(int x) +{ + debug_audioio(x); + +return 0; +} + +/** + * ste_audioio_vibrator_alloc() + * @client: Client id which allocates vibrator + * @mask: Mask against which vibrator usage is checked + * + * This function allocates vibrator. + * Mask is added here as audioio driver controls left and right vibrator + * separately (can work independently). In case when audioio has allocated + * one of its channels (left or right) it should be still able to allocate + * the other channel. + * + * Returns: + * 0 - Success + * -EBUSY - other client already registered + **/ +int ste_audioio_vibrator_alloc(int client, int mask) +{ + int error = 0; + + /* Check if other client is already using vibrator */ + if (ptr_audio_codec_cnxt->vibra_client & ~mask) + error = -EBUSY; + else + ptr_audio_codec_cnxt->vibra_client |= client; + + return error; +} + +/** + * ste_audioio_vibrator_release() + * @client: Client id which releases vibrator + * + * This function releases vibrator + **/ +void ste_audioio_vibrator_release(int client) +{ + ptr_audio_codec_cnxt->vibra_client &= ~client; +} + +/** + * ste_audioio_vibrator_pwm_control() + * @client: Client id which will use vibrator + * @left_speed: Left vibrator speed + * @right_speed: Right vibrator speed + * + * This function controls vibrator using PWM source + * + * Returns: + * 0 - success + * -EBUSY - Vibrator already used + **/ +int ste_audioio_vibrator_pwm_control( + int client, + struct ste_vibra_speed left_speed, + struct ste_vibra_speed right_speed) +{ + int error = 0; + + mutex_lock(&ptr_audio_codec_cnxt->audio_io_mutex); + + /* Try to allocate vibrator for given client */ + error = ste_audioio_vibrator_alloc(client, client); + + mutex_unlock(&ptr_audio_codec_cnxt->audio_io_mutex); + + if (error) + return error; + + /* Duty cycle supported by vibrator's PWM is 0-100 */ + if (left_speed.positive > STE_AUDIOIO_VIBRATOR_MAX_SPEED) + left_speed.positive = STE_AUDIOIO_VIBRATOR_MAX_SPEED; + + if (right_speed.positive > STE_AUDIOIO_VIBRATOR_MAX_SPEED) + right_speed.positive = STE_AUDIOIO_VIBRATOR_MAX_SPEED; + + if (left_speed.negative > STE_AUDIOIO_VIBRATOR_MAX_SPEED) + left_speed.negative = STE_AUDIOIO_VIBRATOR_MAX_SPEED; + + if (right_speed.negative > STE_AUDIOIO_VIBRATOR_MAX_SPEED) + right_speed.negative = STE_AUDIOIO_VIBRATOR_MAX_SPEED; + + if (left_speed.negative || right_speed.negative || + left_speed.positive || right_speed.positive) { + /* Power up audio block for vibrator */ + error = ste_audio_io_core_api_powerup_audiocodec( + STE_AUDIOIO_POWER_VIBRA); + if (error) { + dev_err(ptr_audio_codec_cnxt->dev, + "Audio power up failed %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(ANALOG_OUTPUT_ENABLE_REG, + (EN_VIBL_MASK|EN_VIBR_MASK), 0); + if (error) { + dev_err(ptr_audio_codec_cnxt->dev, + "Powerup Vibrator Class-D driver %d", + error); + return error; + } + + error = HW_REG_WRITE(VIB_DRIVER_CONF_REG, 0xff); + if (error) { + dev_err(ptr_audio_codec_cnxt->dev, + "Enable Vibrator PWM generator %d", + error); + return error; + } + } + + error = HW_REG_WRITE(PWM_VIBNL_CONF_REG, left_speed.negative); + if (error) { + dev_err(ptr_audio_codec_cnxt->dev, + "Write Left Vibrator negative PWM %d", error); + goto err_cleanup; + } + + error = HW_REG_WRITE(PWM_VIBPL_CONF_REG, left_speed.positive); + if (error) { + dev_err(ptr_audio_codec_cnxt->dev, + "Write Left Vibrator positive PWM %d", error); + goto err_cleanup; + } + + error = HW_REG_WRITE(PWM_VIBNR_CONF_REG, right_speed.negative); + if (error) { + dev_err(ptr_audio_codec_cnxt->dev, + "Write Right Vibrator negative PWM %d", error); + goto err_cleanup; + } + + error = HW_REG_WRITE(PWM_VIBPR_CONF_REG, right_speed.positive); + if (error) { + dev_err(ptr_audio_codec_cnxt->dev, + "Write Right Vibrator positive PWM %d", error); + goto err_cleanup; + } + + if (!left_speed.negative && !right_speed.negative && + !left_speed.positive && !right_speed.positive) { + error = HW_REG_WRITE(VIB_DRIVER_CONF_REG, 0); + if (error) { + dev_err(ptr_audio_codec_cnxt->dev, + "Disable PWM Vibrator generator %d", + error); + goto err_cleanup; + } + + error = HW_ACODEC_MODIFY_WRITE(ANALOG_OUTPUT_ENABLE_REG, + 0, (EN_VIBL_MASK|EN_VIBR_MASK)); + if (error) { + dev_err(ptr_audio_codec_cnxt->dev, + "Power down Vibrator Class-D driver %d", + error); + goto err_cleanup; + } + + /* Power down audio block */ + error = ste_audio_io_core_api_powerdown_audiocodec( + STE_AUDIOIO_POWER_VIBRA); + if (error) { + dev_err(ptr_audio_codec_cnxt->dev, + "Audio power down failed %d", error); + goto err_cleanup; + } + } + +err_cleanup: + /* Release client */ + if (!left_speed.negative && !right_speed.negative && + !left_speed.positive && !right_speed.positive) { + mutex_lock(&ptr_audio_codec_cnxt->audio_io_mutex); + ste_audioio_vibrator_release(client); + mutex_unlock(&ptr_audio_codec_cnxt->audio_io_mutex); + } + return error; +} +EXPORT_SYMBOL(ste_audioio_vibrator_pwm_control); + +/** + * @brief This function sets FIR coefficients + * @fir_coeffs: pointer to structure audioio_fir_coefficients_t + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_core_api_fir_coeffs_control(struct audioio_fir_coefficients_t + *fir_coeffs) +{ + unsigned char coefficient; + int i, error; + + if (fir_coeffs->start_addr >= STE_AUDIOIO_MAX_COEFFICIENTS) + return -EINVAL; + + mutex_lock(&(ptr_audio_codec_cnxt->audio_io_mutex)); + + error = HW_REG_WRITE(SIDETONE_FIR_ADDR_REG, fir_coeffs->start_addr); + if (error) { + dev_err(ptr_audio_codec_cnxt->dev, + "FIR start address write failed %d", error); + goto err_cleanup; + } + + for (i = fir_coeffs->start_addr; + i < STE_AUDIOIO_MAX_COEFFICIENTS; i++) { + + coefficient = (fir_coeffs->coefficients[i]>>8) & 0xff; + error = HW_REG_WRITE(SIDETONE_FIR_COEFF_MSB_REG, coefficient); + if (error) { + dev_err(ptr_audio_codec_cnxt->dev, + "FIR coefficient [%d] msb write failed %d", i, error); + goto err_cleanup; + } + + coefficient = fir_coeffs->coefficients[i] & 0xff; + error = HW_REG_WRITE(SIDETONE_FIR_COEFF_LSB_REG, coefficient); + if (error) { + dev_err(ptr_audio_codec_cnxt->dev, + "FIR coefficient [%d] lsb write failed %d", i, error); + goto err_cleanup; + } + } + + error = HW_ACODEC_MODIFY_WRITE(SIDETONE_FIR_ADDR_REG, + APPLY_FIR_COEFFS_MASK, 0); + if (error) { + dev_err(ptr_audio_codec_cnxt->dev, + "FIR coefficients activation failed %d", error); + goto err_cleanup; + } + + error = HW_ACODEC_MODIFY_WRITE(FILTERS_CONTROL_REG, + FIR_FILTERCONTROL, 0); + if (error) { + dev_err(ptr_audio_codec_cnxt->dev, + "ST FIR Filters enable failed %d", error); + goto err_cleanup; + } + +err_cleanup: + mutex_unlock(&(ptr_audio_codec_cnxt->audio_io_mutex)); + return error; +} + +/** + * @brief This function sets and enable clock + * @clk_type: pointer to structure audioio_clk_select_t + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_core_clk_select_control(struct audioio_clk_select_t + *clk_type) +{ + int error = 0; + mutex_lock(&(ptr_audio_codec_cnxt->audio_io_mutex)); + if (ptr_audio_codec_cnxt->clk_type != clk_type->required_clk) { + /* disable running clk*/ + ste_audio_io_disable_audio_clock(); + /* assign required clk*/ + ptr_audio_codec_cnxt->clk_type = clk_type->required_clk; + /* enable required clk*/ + error = ste_audio_io_enable_audio_clock(); + if (error) { + dev_err(ptr_audio_codec_cnxt->dev, + "Clock enabled failed = %d", error); + goto err_cleanup; + } + } +err_cleanup: + mutex_unlock(&(ptr_audio_codec_cnxt->audio_io_mutex)); + return error; +} + diff --git a/drivers/misc/audio_io_dev/ste_audio_io_core.h b/drivers/misc/audio_io_dev/ste_audio_io_core.h new file mode 100644 index 00000000000..44849f1d5e8 --- /dev/null +++ b/drivers/misc/audio_io_dev/ste_audio_io_core.h @@ -0,0 +1,138 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Deepak KARDA/ deepak.karda@stericsson.com for ST-Ericsson + * License terms: GNU General Public License (GPL) version 2. + */ + +#ifndef _AUDIOIO_CORE_H_ +#define _AUDIOIO_CORE_H_ + +#include <mach/ste_audio_io_ioctl.h> +#include "ste_audio_io_func.h" +#include "ste_audio_io_hwctrl_common.h" + +#define MAX_NO_CHANNELS 4 + +#define STE_AUDIOIO_POWER_AUDIO 1 +#define STE_AUDIOIO_POWER_VIBRA 2 + +struct transducer_context_t { + /* public variables */ + int gain[MAX_NO_CHANNELS]; + int is_muted[MAX_NO_CHANNELS]; + int is_power_up[MAX_NO_CHANNELS]; + /* public funcs */ + int (*pwr_up_func)(enum AUDIOIO_CH_INDEX, struct device *); + int (*pwr_down_func)(enum AUDIOIO_CH_INDEX, struct device *); + int (*pwr_state_func)(struct device *); + int (*set_gain_func)(enum AUDIOIO_CH_INDEX, u16, int, u32, + struct device *); + int (*get_gain_func)(int *, int *, u16, struct device *); + int (*mute_func)(enum AUDIOIO_CH_INDEX, struct device *); + int (*unmute_func)(enum AUDIOIO_CH_INDEX, int *, struct device *); + int (*mute_state_func)(struct device *); + int (*enable_fade_func)(struct device *); + int (*disable_fade_func)(struct device *); + int (*switch_to_burst_func)(int, struct device *); + int (*switch_to_normal_func)(struct device *); + int (*enable_loop)(enum AUDIOIO_CH_INDEX, enum AUDIOIO_HAL_HW_LOOPS, + int, struct device *, void *); + int (*disable_loop)(enum AUDIOIO_CH_INDEX, enum AUDIOIO_HAL_HW_LOOPS, + struct device *, void *); +}; + +struct audiocodec_context_t { + int audio_codec_powerup; + int is_audio_clk_enabled; + enum AUDIOIO_CLK_TYPE clk_type; + int power_client; + int vibra_client; + struct mutex audio_io_mutex; + struct mutex vibrator_mutex; + struct transducer_context_t *transducer[MAX_NO_TRANSDUCERS]; + struct device *dev; + int (*gpio_altf_init) (void); + int (*gpio_altf_exit) (void); +}; + + +int ste_audio_io_core_api_access_read(struct audioio_data_t *dev_data); + +int ste_audio_io_core_api_access_write(struct audioio_data_t *dev_data); + +int ste_audio_io_core_api_power_control_transducer( + struct audioio_pwr_ctrl_t *pwr_ctrl); + +int ste_audio_io_core_api_power_status_transducer( + struct audioio_pwr_ctrl_t *pwr_ctrl); + +int ste_audio_io_core_api_loop_control(struct audioio_loop_ctrl_t *loop_ctrl); + +int ste_audio_io_core_api_loop_status(struct audioio_loop_ctrl_t *loop_ctrl); + +int ste_audio_io_core_api_get_transducer_gain_capability( + struct audioio_get_gain_t *get_gain); + +int ste_audio_io_core_api_gain_capabilities_loop( + struct audioio_gain_loop_t *gain_loop); + +int ste_audio_io_core_api_supported_loops( + struct audioio_support_loop_t *support_loop); + +int ste_audio_io_core_api_gain_descriptor_transducer( + struct audioio_gain_desc_trnsdr_t *gdesc_trnsdr); + +int ste_audio_io_core_api_gain_control_transducer( + struct audioio_gain_ctrl_trnsdr_t *gctrl_trnsdr); + +int ste_audio_io_core_api_gain_query_transducer( + struct audioio_gain_ctrl_trnsdr_t *gctrl_trnsdr); + +int ste_audio_io_core_api_mute_control_transducer( + struct audioio_mute_trnsdr_t *mute_trnsdr); + +int ste_audio_io_core_api_mute_status_transducer( + struct audioio_mute_trnsdr_t *mute_trnsdr); + +int ste_audio_io_core_api_fading_control(struct audioio_fade_ctrl_t *fade_ctrl); + +int ste_audio_io_core_api_burstmode_control( + struct audioio_burst_ctrl_t *burst_ctrl); + +int ste_audio_io_core_api_powerup_audiocodec(int power_client); + +int ste_audio_io_core_api_powerdown_audiocodec(int power_client); + +int ste_audio_io_core_api_init_data(struct platform_device *pdev); + +bool ste_audio_io_core_is_ready_for_suspend(void); +void ste_audio_io_core_api_free_data(void); + +int ste_audio_io_core_api_fsbitclk_control( + struct audioio_fsbitclk_ctrl_t *fsbitclk_ctrl); +int ste_audio_io_core_api_pseudoburst_control( + struct audioio_pseudoburst_ctrl_t *pseudoburst_ctrl); + +void ste_audio_io_core_api_store_data(enum AUDIOIO_CH_INDEX channel_index, + int *ptr, int value); + +int ste_audioio_vibrator_alloc(int client, int mask); + +void ste_audioio_vibrator_release(int client); + +enum AUDIOIO_COMMON_SWITCH ste_audio_io_core_api_get_status( + enum AUDIOIO_CH_INDEX channel_index, int *ptr); + +int ste_audio_io_core_api_acodec_power_control(struct audioio_acodec_pwr_ctrl_t + *audio_acodec_pwr_ctrl); + +int ste_audio_io_core_api_fir_coeffs_control(struct audioio_fir_coefficients_t + *fir_coeffs); + +int ste_audio_io_core_clk_select_control(struct audioio_clk_select_t + *clk_type); + +int ste_audio_io_core_debug(int x); + +#endif /* _AUDIOIO_CORE_H_ */ + diff --git a/drivers/misc/audio_io_dev/ste_audio_io_dev.c b/drivers/misc/audio_io_dev/ste_audio_io_dev.c new file mode 100644 index 00000000000..edea31e4315 --- /dev/null +++ b/drivers/misc/audio_io_dev/ste_audio_io_dev.c @@ -0,0 +1,759 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Deepak KARDA/ deepak.karda@stericsson.com for ST-Ericsson + * License terms: GNU General Public License (GPL) version 2. + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/ioctl.h> +#include <linux/fs.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/cdev.h> +#include <linux/uaccess.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/gpio.h> +#include <linux/clk.h> +#include <linux/platform_device.h> +#include <linux/miscdevice.h> +#include "ste_audio_io_dev.h" + +#define STR_DEBUG_ON "debug on" +#define AUDIOIO_DEVNAME "ab8500-codec" + +static int ste_audio_io_open(struct inode *inode, struct file *filp); +static int ste_audio_io_release(struct inode *inode, struct file *filp); +static long ste_audio_io_ioctl(struct file *file, unsigned int cmd, + unsigned long arg); +static int ste_audio_io_cmd_parser(unsigned int cmd, unsigned long arg); +static ssize_t ste_audio_io_write(struct file *filp, + const char __user *buf, size_t count, loff_t *f_pos); + + +/** + * @brief Check IOCTL type, command no and access direction + * @ inode value corresponding to the file descriptor + * @file value corresponding to the file descriptor + * @cmd IOCTL command code + * @arg Command argument + * @return 0 on success otherwise negative error code + */ +static long ste_audio_io_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int retval = 0; + int err = 0; + + /* Check type and command number */ + if (_IOC_TYPE(cmd) != AUDIOIO_IOC_MAGIC) + return -ENOTTY; + + /* IOC_DIR is from the user perspective, while access_ok is + * from the kernel perspective; so they look reversed. + */ + if (_IOC_DIR(cmd) & _IOC_READ) + err = !access_ok(VERIFY_WRITE, (void __user *)arg, + _IOC_SIZE(cmd)); + if (err == 0 && _IOC_DIR(cmd) & _IOC_WRITE) + err = !access_ok(VERIFY_READ, (void __user *)arg, + _IOC_SIZE(cmd)); + if (err) + return -EFAULT; + + retval = ste_audio_io_cmd_parser(cmd, arg); + + return retval; +} +/** + * @brief IOCTL call to read the value from AB8500 device + * @cmd IOCTL command code + * @arg Command argument + * @return 0 on success otherwise negative error code + */ + +static int process_read_register_cmd(unsigned int cmd, unsigned long arg) +{ + int retval = 0; + union audioio_cmd_data_t cmd_data; + struct audioio_data_t *audio_dev_data; + + audio_dev_data = (struct audioio_data_t *)&cmd_data; + + if (copy_from_user(audio_dev_data, (void __user *)arg, + sizeof(struct audioio_data_t))) + return -EFAULT; + + retval = ste_audio_io_core_api_access_read(audio_dev_data); + if (0 != retval) + return retval; + + if (copy_to_user((void __user *)arg, audio_dev_data, + sizeof(struct audioio_data_t))) + return -EFAULT; + return 0; +} +/** + * @brief IOCTL call to write the given value to the AB8500 device + * @cmd IOCTL command code + * @arg Command argument + * @return 0 on success otherwise negative error code + */ + +static int process_write_register_cmd(unsigned int cmd, unsigned long arg) +{ + int retval = 0; + union audioio_cmd_data_t cmd_data; + struct audioio_data_t *audio_dev_data; + + audio_dev_data = (struct audioio_data_t *)&cmd_data; + + if (copy_from_user(audio_dev_data, (void __user *)arg, + sizeof(struct audioio_data_t))) + return -EFAULT; + + retval = ste_audio_io_core_api_access_write(audio_dev_data); + + return retval; +} +/** + * @brief IOCTL call to control the power on/off of hardware components + * @cmd IOCTL command code + * @arg Command argument + * @return 0 on success otherwise negative error code + */ + +static int process_pwr_ctrl_cmd(unsigned int cmd, unsigned long arg) +{ + int retval = 0; + union audioio_cmd_data_t cmd_data; + struct audioio_pwr_ctrl_t *audio_pwr_ctrl; + + audio_pwr_ctrl = (struct audioio_pwr_ctrl_t *)&cmd_data; + + if (copy_from_user(audio_pwr_ctrl, (void __user *)arg, + sizeof(struct audioio_pwr_ctrl_t))) + return -EFAULT; + + retval = ste_audio_io_core_api_power_control_transducer(audio_pwr_ctrl); + + return retval; +} + +static int process_pwr_sts_cmd(unsigned int cmd, unsigned long arg) +{ + int retval = 0; + union audioio_cmd_data_t cmd_data; + struct audioio_pwr_ctrl_t *audio_pwr_sts; + + audio_pwr_sts = (struct audioio_pwr_ctrl_t *)&cmd_data; + + if (copy_from_user(audio_pwr_sts, (void __user *)arg, + sizeof(struct audioio_pwr_ctrl_t))) + return -EFAULT; + + retval = ste_audio_io_core_api_power_status_transducer(audio_pwr_sts); + if (0 != retval) + return retval; + + if (copy_to_user((void __user *)arg, audio_pwr_sts, + sizeof(struct audioio_pwr_ctrl_t))) + return -EFAULT; + + return 0; +} + +static int process_lp_ctrl_cmd(unsigned int cmd, unsigned long arg) +{ + int retval = 0; + union audioio_cmd_data_t cmd_data; + struct audioio_loop_ctrl_t *audio_lp_ctrl; + + audio_lp_ctrl = (struct audioio_loop_ctrl_t *)&cmd_data; + + if (copy_from_user(audio_lp_ctrl, (void __user *)arg, + sizeof(struct audioio_loop_ctrl_t))) + return -EFAULT; + + retval = ste_audio_io_core_api_loop_control(audio_lp_ctrl); + + return retval; +} + +static int process_lp_sts_cmd(unsigned int cmd, unsigned long arg) +{ + int retval = 0; + union audioio_cmd_data_t cmd_data; + struct audioio_loop_ctrl_t *audio_lp_sts; + + audio_lp_sts = (struct audioio_loop_ctrl_t *)&cmd_data; + + + if (copy_from_user(audio_lp_sts, (void __user *)arg, + sizeof(struct audioio_loop_ctrl_t))) + return -EFAULT; + + retval = ste_audio_io_core_api_loop_status(audio_lp_sts); + if (0 != retval) + return retval; + + if (copy_to_user((void __user *)arg, audio_lp_sts, + sizeof(struct audioio_loop_ctrl_t))) + return -EFAULT; + return 0; +} + +static int process_get_trnsdr_gain_capability_cmd(unsigned int cmd, + unsigned long arg) +{ + int retval = 0; + union audioio_cmd_data_t cmd_data; + struct audioio_get_gain_t *audio_trnsdr_gain; + + audio_trnsdr_gain = (struct audioio_get_gain_t *)&cmd_data; + + if (copy_from_user(audio_trnsdr_gain, (void __user *)arg, + sizeof(struct audioio_get_gain_t))) + return -EFAULT; + + retval = ste_audio_io_core_api_get_transducer_gain_capability( + audio_trnsdr_gain); + if (0 != retval) + return retval; + + if (copy_to_user((void __user *)arg, audio_trnsdr_gain, + sizeof(struct audioio_get_gain_t))) + return -EFAULT; + return 0; +} + +static int process_gain_cap_loop_cmd(unsigned int cmd, unsigned long arg) +{ + int retval = 0; + union audioio_cmd_data_t cmd_data; + struct audioio_gain_loop_t *audio_gain_loop; + + audio_gain_loop = (struct audioio_gain_loop_t *)&cmd_data; + + if (copy_from_user(audio_gain_loop, (void __user *)arg, + sizeof(struct audioio_gain_loop_t))) + return -EFAULT; + + retval = ste_audio_io_core_api_gain_capabilities_loop(audio_gain_loop); + if (0 != retval) + return retval; + + if (copy_to_user((void __user *)arg, audio_gain_loop, + sizeof(struct audioio_gain_loop_t))) + return -EFAULT; + return 0; +} + + +static int process_support_loop_cmd(unsigned int cmd, unsigned long arg) +{ + int retval = 0; + union audioio_cmd_data_t cmd_data; + struct audioio_support_loop_t *audio_spprt_loop; + + audio_spprt_loop = (struct audioio_support_loop_t *)&cmd_data; + + if (copy_from_user(audio_spprt_loop, (void __user *)arg, + sizeof(struct audioio_support_loop_t))) + return -EFAULT; + + retval = ste_audio_io_core_api_supported_loops(audio_spprt_loop); + if (0 != retval) + return retval; + + if (copy_to_user((void __user *)arg, audio_spprt_loop, + sizeof(struct audioio_support_loop_t))) + return -EFAULT; + return 0; +} + + +static int process_gain_desc_trnsdr_cmd(unsigned int cmd, unsigned long arg) + +{ + int retval = 0; + union audioio_cmd_data_t cmd_data; + struct audioio_gain_desc_trnsdr_t *audio_gain_desc; + + audio_gain_desc = (struct audioio_gain_desc_trnsdr_t *)&cmd_data; + + if (copy_from_user(audio_gain_desc, (void __user *)arg, + sizeof(struct audioio_gain_desc_trnsdr_t))) + return -EFAULT; + + retval = ste_audio_io_core_api_gain_descriptor_transducer( + audio_gain_desc); + if (0 != retval) + return retval; + + if (copy_to_user((void __user *)arg, audio_gain_desc, + sizeof(struct audioio_gain_desc_trnsdr_t))) + return -EFAULT; + return 0; +} + + +static int process_gain_ctrl_trnsdr_cmd(unsigned int cmd, unsigned long arg) +{ + int retval = 0; + union audioio_cmd_data_t cmd_data; + struct audioio_gain_ctrl_trnsdr_t *audio_gain_ctrl; + + audio_gain_ctrl = (struct audioio_gain_ctrl_trnsdr_t *)&cmd_data; + + if (copy_from_user(audio_gain_ctrl, (void __user *)arg, + sizeof(struct audioio_gain_ctrl_trnsdr_t))) + return -EFAULT; + + retval = ste_audio_io_core_api_gain_control_transducer( + audio_gain_ctrl); + + return retval; +} + +static int process_gain_query_trnsdr_cmd(unsigned int cmd, unsigned long arg) +{ + int retval = 0; + union audioio_cmd_data_t cmd_data; + struct audioio_gain_ctrl_trnsdr_t *audio_gain_query; + + audio_gain_query = (struct audioio_gain_ctrl_trnsdr_t *)&cmd_data; + + if (copy_from_user(audio_gain_query, (void __user *)arg, + sizeof(struct audioio_gain_ctrl_trnsdr_t))) + return -EFAULT; + + retval = ste_audio_io_core_api_gain_query_transducer(audio_gain_query); + if (0 != retval) + return retval; + + if (copy_to_user((void __user *)arg, audio_gain_query, + sizeof(struct audioio_gain_ctrl_trnsdr_t))) + return -EFAULT; + return 0; +} + +static int process_mute_ctrl_cmd(unsigned int cmd, unsigned long arg) +{ + int retval = 0; + union audioio_cmd_data_t cmd_data; + struct audioio_mute_trnsdr_t *audio_mute_ctrl; + + audio_mute_ctrl = (struct audioio_mute_trnsdr_t *)&cmd_data; + if (copy_from_user(audio_mute_ctrl , (void __user *)arg, + sizeof(struct audioio_mute_trnsdr_t))) + return -EFAULT; + + retval = ste_audio_io_core_api_mute_control_transducer( + audio_mute_ctrl); + + return retval; +} + +static int process_mute_sts_cmd(unsigned int cmd, unsigned long arg) +{ + int retval = 0; + union audioio_cmd_data_t cmd_data; + struct audioio_mute_trnsdr_t *audio_mute_sts; + + audio_mute_sts = (struct audioio_mute_trnsdr_t *)&cmd_data; + + if (copy_from_user(audio_mute_sts, (void __user *)arg, + sizeof(struct audioio_mute_trnsdr_t))) + return -EFAULT; + + retval = ste_audio_io_core_api_mute_status_transducer(audio_mute_sts); + if (0 != retval) + return retval; + + if (copy_to_user((void __user *)arg, audio_mute_sts, + sizeof(struct audioio_mute_trnsdr_t))) + return -EFAULT; + return 0; +} + +static int process_fade_cmd(unsigned int cmd, unsigned long arg) +{ + int retval = 0; + union audioio_cmd_data_t cmd_data; + struct audioio_fade_ctrl_t *audio_fade; + audio_fade = (struct audioio_fade_ctrl_t *)&cmd_data; + + if (copy_from_user(audio_fade , (void __user *)arg, + sizeof(struct audioio_fade_ctrl_t))) + return -EFAULT; + + retval = ste_audio_io_core_api_fading_control(audio_fade); + + return retval; +} + +static int process_burst_ctrl_cmd(unsigned int cmd, unsigned long arg) +{ + int retval = 0; + union audioio_cmd_data_t cmd_data; + struct audioio_burst_ctrl_t *audio_burst; + + audio_burst = (struct audioio_burst_ctrl_t *)&cmd_data; + if (copy_from_user(audio_burst , (void __user *)arg, + sizeof(struct audioio_burst_ctrl_t))) + return -EFAULT; + + retval = ste_audio_io_core_api_burstmode_control(audio_burst); + + return retval; + + return 0; +} + +static int process_fsbitclk_ctrl_cmd(unsigned int cmd, unsigned long arg) +{ + int retval = 0; + union audioio_cmd_data_t cmd_data; + struct audioio_fsbitclk_ctrl_t *audio_fsbitclk; + + audio_fsbitclk = (struct audioio_fsbitclk_ctrl_t *)&cmd_data; + + if (copy_from_user(audio_fsbitclk , (void __user *)arg, + sizeof(struct audioio_fsbitclk_ctrl_t))) + return -EFAULT; + + retval = ste_audio_io_core_api_fsbitclk_control(audio_fsbitclk); + + return retval; + + return 0; + +} + +static int process_pseudoburst_ctrl_cmd(unsigned int cmd, unsigned long arg) +{ + int retval = 0; + union audioio_cmd_data_t cmd_data; + struct audioio_pseudoburst_ctrl_t *audio_pseudoburst; + + audio_pseudoburst = (struct audioio_pseudoburst_ctrl_t *)&cmd_data; + + if (copy_from_user(audio_pseudoburst , (void __user *)arg, + sizeof(struct audioio_pseudoburst_ctrl_t))) + return -EFAULT; + + retval = ste_audio_io_core_api_pseudoburst_control(audio_pseudoburst); + + return retval; + + return 0; + +} +static int process_audiocodec_pwr_ctrl_cmd(unsigned int cmd, unsigned long arg) +{ + int retval = 0; + union audioio_cmd_data_t cmd_data; + struct audioio_acodec_pwr_ctrl_t *audio_acodec_pwr_ctrl; + audio_acodec_pwr_ctrl = (struct audioio_acodec_pwr_ctrl_t *)&cmd_data; + if (copy_from_user(audio_acodec_pwr_ctrl, (void __user *)arg, + sizeof(struct audioio_acodec_pwr_ctrl_t))) + return -EFAULT; + retval = ste_audio_io_core_api_acodec_power_control( + audio_acodec_pwr_ctrl); + return retval; +} + +static int process_fir_coeffs_ctrl_cmd(unsigned int cmd, unsigned long arg) +{ + int retval; + struct audioio_fir_coefficients_t *cmd_data; + cmd_data = kmalloc(sizeof(struct audioio_fir_coefficients_t), + GFP_KERNEL); + if (!cmd_data) + return -ENOMEM; + if (copy_from_user(cmd_data, (void __user *)arg, + sizeof(struct audioio_fir_coefficients_t))) { + kfree(cmd_data); + return -EFAULT; + } + retval = ste_audio_io_core_api_fir_coeffs_control(cmd_data); + kfree(cmd_data); + return retval; +} + +static int process_clk_select_cmd(unsigned int cmd, unsigned long arg) +{ + int retval; + struct audioio_clk_select_t *cmd_data; + cmd_data = kmalloc(sizeof(struct audioio_clk_select_t), + GFP_KERNEL); + if (!cmd_data) + return -ENOMEM; + if (copy_from_user(cmd_data, (void __user *)arg, + sizeof(struct audioio_clk_select_t))) { + kfree(cmd_data); + return -EFAULT; + } + retval = ste_audio_io_core_clk_select_control(cmd_data); + kfree(cmd_data); + return retval; +} + +static int ste_audio_io_cmd_parser(unsigned int cmd, unsigned long arg) +{ + int retval = 0; + + switch (cmd) { + case AUDIOIO_READ_REGISTER: + retval = process_read_register_cmd(cmd, arg); + break; + + case AUDIOIO_WRITE_REGISTER: + retval = process_write_register_cmd(cmd, arg); + break; + + case AUDIOIO_PWR_CTRL_TRNSDR: + retval = process_pwr_ctrl_cmd(cmd, arg); + break; + + case AUDIOIO_PWR_STS_TRNSDR: + retval = process_pwr_sts_cmd(cmd, arg); + break; + + case AUDIOIO_LOOP_CTRL: + retval = process_lp_ctrl_cmd(cmd, arg); + break; + + case AUDIOIO_LOOP_STS: + retval = process_lp_sts_cmd(cmd, arg); + break; + + case AUDIOIO_GET_TRNSDR_GAIN_CAPABILITY: + retval = process_get_trnsdr_gain_capability_cmd(cmd, arg); + break; + + case AUDIOIO_GAIN_CAP_LOOP: + retval = process_gain_cap_loop_cmd(cmd, arg); + break; + + case AUDIOIO_SUPPORT_LOOP: + retval = process_support_loop_cmd(cmd, arg); + break; + + case AUDIOIO_GAIN_DESC_TRNSDR: + retval = process_gain_desc_trnsdr_cmd(cmd, arg); + break; + + case AUDIOIO_GAIN_CTRL_TRNSDR: + retval = process_gain_ctrl_trnsdr_cmd(cmd, arg); + break; + + case AUDIOIO_GAIN_QUERY_TRNSDR: + retval = process_gain_query_trnsdr_cmd(cmd, arg); + break; + + case AUDIOIO_MUTE_CTRL_TRNSDR: + retval = process_mute_ctrl_cmd(cmd, arg); + break; + + case AUDIOIO_MUTE_STS_TRNSDR: + retval = process_mute_sts_cmd(cmd, arg); + break; + + case AUDIOIO_FADE_CTRL: + retval = process_fade_cmd(cmd, arg); + break; + + case AUDIOIO_BURST_CTRL: + retval = process_burst_ctrl_cmd(cmd, arg); + break; + + case AUDIOIO_FSBITCLK_CTRL: + retval = process_fsbitclk_ctrl_cmd(cmd, arg); + break; + + case AUDIOIO_PSEUDOBURST_CTRL: + retval = process_pseudoburst_ctrl_cmd(cmd, arg); + break; + + case AUDIOIO_AUDIOCODEC_PWR_CTRL: + retval = process_audiocodec_pwr_ctrl_cmd(cmd, arg); + break; + + case AUDIOIO_FIR_COEFFS_CTRL: + retval = process_fir_coeffs_ctrl_cmd(cmd, arg); + break; + case AUDIOIO_CLK_SELECT_CTRL: + retval = process_clk_select_cmd(cmd, arg); + break; + } + return retval; +} + +static int ste_audio_io_open(struct inode *inode, struct file *filp) +{ + if (!try_module_get(THIS_MODULE)) + return -ENODEV; + return 0; +} + +static int ste_audio_io_release(struct inode *inode, struct file *filp) +{ + module_put(THIS_MODULE); + return 0; +} + +static ssize_t ste_audio_io_write(struct file *filp, + const char __user *buf, size_t count, loff_t *f_pos) +{ + char *x = kmalloc(count, GFP_KERNEL); + int debug_flag = 0; + + if (copy_from_user(x, buf, count)) + return -EFAULT; + + if (count >= strlen(STR_DEBUG_ON)) { + + if (!strncmp(STR_DEBUG_ON, x, strlen(STR_DEBUG_ON))) + debug_flag = 1; + else + debug_flag = 0; + } + + ste_audio_io_core_debug(debug_flag); + + kfree(x); + + return count; +} + +static const struct file_operations ste_audio_io_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = ste_audio_io_ioctl, + .open = ste_audio_io_open, + .release = ste_audio_io_release, + .write = ste_audio_io_write, +}; + +/** + * audio_io_misc_dev - Misc device config for audio_io + */ +static struct miscdevice audio_io_misc_dev = { + MISC_DYNAMIC_MINOR, + "audioio", + &ste_audio_io_fops +}; + +/** + * ste_audio_io_probe() - probe the device + * @pdev: pointer to the platform device structure + * + * This funtion is called after the driver is registered to platform + * device framework. It does allocate the memory for the internal + * data structure and intialized core APIs. + */ +static int ste_audio_io_drv_probe(struct platform_device *pdev) +{ + int error; + + ste_audio_io_device = pdev; + + dev_dbg(&ste_audio_io_device->dev, "ste_audio_io device probe\n"); + + error = misc_register(&audio_io_misc_dev); + if (error) { + printk(KERN_WARNING "%s: registering misc device failed\n", + __func__); + return error; + } + + error = ste_audio_io_core_api_init_data(ste_audio_io_device); + if (error < 0) { + dev_err(&ste_audio_io_device->dev, + "ste_audioio_core_api_init_data failed err = %d", + error); + goto ste_audio_io_misc_deregister; + } + return 0; + +ste_audio_io_misc_deregister: + misc_deregister(&audio_io_misc_dev); + return error; +} + +/** + * ste_audio_io_remove() - Removes the device + * @pdev: pointer to the platform_device structure + * + * This function is called when this mnodule is removed using rmmod + */ +static int ste_audio_io_drv_remove(struct platform_device *pdev) +{ + ste_audio_io_core_api_free_data(); + misc_deregister(&audio_io_misc_dev); + return 0; +} + +/** + * ste_audio_io_drv_suspend - suspend audio_io + * @pdev: platform data + * @state: power down level + */ +static int ste_audio_io_drv_suspend(struct platform_device *pdev, + pm_message_t state) +{ + if (ste_audio_io_core_is_ready_for_suspend()) + return 0; + else + return -EINVAL; +} + +/** + * ste_audio_io_drv_resume - put back audio_io in the normal state + * @pdev: platform data + */ +static int ste_audio_io_drv_resume(struct platform_device *pdev) +{ + return 0; +} + +/** + * struct audio_io_driver: audio_io platform structure + * @probe: The probe funtion to be called + * @remove: The remove funtion to be called + * @resume: The resume function to be called + * @suspend: The suspend function to be called + * @driver: The driver data + */ +static struct platform_driver ste_audio_io_driver = { + .probe = ste_audio_io_drv_probe, + .remove = ste_audio_io_drv_remove, + .driver = { + .name = AUDIOIO_DEVNAME, + .owner = THIS_MODULE, + }, + .suspend = ste_audio_io_drv_suspend, + .resume = ste_audio_io_drv_resume, +}; + +/** Pointer to platform device needed to access abx500 core functions */ +struct platform_device *ste_audio_io_device; + +static int __init ste_audio_io_init(void) +{ + return platform_driver_register(&ste_audio_io_driver); +} +module_init(ste_audio_io_init); + +static void __exit ste_audio_io_exit(void) +{ + platform_driver_unregister(&ste_audio_io_driver); +} +module_exit(ste_audio_io_exit); + +MODULE_AUTHOR("Deepak KARDA <deepak.karda@stericsson.com>"); +MODULE_DESCRIPTION("STE_AUDIO_IO"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/audio_io_dev/ste_audio_io_dev.h b/drivers/misc/audio_io_dev/ste_audio_io_dev.h new file mode 100644 index 00000000000..bcb9dce3ad2 --- /dev/null +++ b/drivers/misc/audio_io_dev/ste_audio_io_dev.h @@ -0,0 +1,32 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Deepak KARDA/ deepak.karda@stericsson.com for ST-Ericsson + * License terms: GNU General Public License (GPL) version 2. + */ + +#ifndef _AUDIOIO_DEV_H_ +#define _AUDIOIO_DEV_H_ + +#include <mach/ste_audio_io_ioctl.h> +#include "ste_audio_io_core.h" + +union audioio_cmd_data_t { + struct audioio_burst_ctrl_t audioio_burst_ctrl; + struct audioio_fade_ctrl_t audioio_fade_ctrl; + struct audioio_mute_trnsdr_t audioio_mute_trnsdr; + struct audioio_gain_ctrl_trnsdr_t audioio_gain_ctrl_trnsdr; + struct audioio_gain_desc_trnsdr_t audioio_gain_desc_trnsdr; + struct audioio_support_loop_t audioio_support_loop; + struct audioio_gain_loop_t audioio_gain_loop; + struct audioio_get_gain_t audioio_get_gain; + struct audioio_loop_ctrl_t audioio_loop_ctrl; + struct audioio_pwr_ctrl_t audioio_pwr_ctrl; + struct audioio_data_t audioio_data; + struct audioio_fsbitclk_ctrl_t audioio_fsbitclk_ctrl; + struct audioio_acodec_pwr_ctrl_t audioio_acodec_pwr_ctrl; + struct audioio_pseudoburst_ctrl_t audioio_pseudoburst_ctrl; +}; + + +#endif /* _AUDIOIO_DEV_H_ */ + diff --git a/drivers/misc/audio_io_dev/ste_audio_io_func.c b/drivers/misc/audio_io_dev/ste_audio_io_func.c new file mode 100644 index 00000000000..73c9b269685 --- /dev/null +++ b/drivers/misc/audio_io_dev/ste_audio_io_func.c @@ -0,0 +1,4371 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Deepak KARDA/ deepak.karda@stericsson.com for ST-Ericsson + * License terms: GNU General Public License (GPL) version 2. + */ + +#include <linux/gpio.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/delay.h> +#include <linux/jiffies.h> +#include <mach/ste_audio_io_vibrator.h> +#include <mach/ste_audio_io.h> +#include "ste_audio_io_func.h" +#include "ste_audio_io_core.h" +#include "ste_audio_io_ab8500_reg_defs.h" +#include "ste_audio_io_hwctrl_common.h" + +static struct clk *clk_ptr_msp0; +static int bluetooth_power_up_count; +static int acodec_reg_dump; + +#define NCP_TIMEOUT 200 /* 200 ms */ +/* + * TODO: Use proper register defines instead of home-made generic ones. + */ +#define SHIFT_QUARTET0 0 +#define SHIFT_QUARTET1 4 +#define MASK_QUARTET (0xFUL) +#define MASK_QUARTET1 (MASK_QUARTET << SHIFT_QUARTET1) +#define MASK_QUARTET0 (MASK_QUARTET << SHIFT_QUARTET0) + +/** + * @brief Modify the specified register + * @reg Register + * @mask_set Bit to be set + * @mask_clear Bit to be cleared + * @return 0 on success otherwise negative error code + */ + +unsigned int ab8500_acodec_modify_write(unsigned int reg, u8 mask_set, + u8 mask_clear) +{ + u8 value8, retval = 0; + value8 = HW_REG_READ(reg); + /* clear the specified bit */ + value8 &= ~mask_clear; + /* set the asked bit */ + value8 |= mask_set; + retval = HW_REG_WRITE(reg, value8); + return retval; +} + +/** + * @brief Power up headset on a specific channel + * @channel_index Channel-index of headset + * @return 0 on success otherwise negative error code + */ + +int ste_audio_io_power_up_headset(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + int error = 0; + unsigned char initialVal_DA = 0; + unsigned long end_time; + + /* Check if HS PowerUp request is mono or Stereo channel */ + if (!(channel_index & (e_CHANNEL_1 | e_CHANNEL_2))) { + dev_err(dev, "HS should have mono or stereo channels"); + return -EINVAL; + } + + ste_audio_io_mute_headset(channel_index, dev); + + error = HW_ACODEC_MODIFY_WRITE(NCP_ENABLE_HS_AUTOSTART_REG, + HS_AUTO_EN, 0); + if (0 != error) { + dev_err(dev, "NCP fully controlled with EnCpHs bit %d", error); + return error; + } + error = HW_ACODEC_MODIFY_WRITE(NCP_ENABLE_HS_AUTOSTART_REG, + (EN_NEG_CP|HS_AUTO_EN), 0); + if (0 != error) { + dev_err(dev, "Enable Negative Charge Pump %d", error); + return error; + } + + /* Wait for negative charge pump to start */ + end_time = jiffies + msecs_to_jiffies(NCP_TIMEOUT); + while (!(HW_REG_READ(IRQ_STATUS_MSB_REG) & NCP_READY_MASK) + && time_after_eq(end_time, jiffies)) { + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(1); + } + + if (!(HW_REG_READ(IRQ_STATUS_MSB_REG) & NCP_READY_MASK)) { + error = -EFAULT; + dev_err(dev, "Negative Charge Pump start error % d", error); + return error; + } + + /* Enable DA1 for HSL */ + if (channel_index & e_CHANNEL_1) { + + /* Power Up HSL driver */ + error = HW_ACODEC_MODIFY_WRITE(ANALOG_OUTPUT_ENABLE_REG, + EN_HSL_MASK, 0); + if (0 != error) { + dev_err(dev, "Power Up HSL Driver %d", error); + return error; + } + + initialVal_DA = HW_REG_READ(DIGITAL_DA_CHANNELS_ENABLE_REG); + + if (EN_DA1 & initialVal_DA) + return 0; + + error = HW_ACODEC_MODIFY_WRITE(SLOT_SELECTION_TO_DA1_REG, + SLOT08_FOR_DA_PATH, 0); + if (0 != error) { + dev_err(dev, "Data sent to DA1 from Slot 08 %d", + error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_MUXES_REG1, + DA1_TO_HSL, 0); + if (0 != error) { + dev_err(dev, + "DA_IN1 path mixed with sidetone FIR %d", + error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_DA_CHANNELS_ENABLE_REG, + EN_DA1, 0); + if (0 != error) { + dev_err(dev, "Power up HSL %d ", error); + return error; + } + + /* Power Up HSL DAC driver */ + error = HW_ACODEC_MODIFY_WRITE(ADC_DAC_ENABLE_REG, + POWER_UP_HSL_DAC, 0); + if (0 != error) { + dev_err(dev, "Power Up HSL DAC driver %d", error); + return error; + } + + /* Power up HSL DAC and digital path */ + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_OUTPUT_ENABLE_REG, + EN_HSL_MASK, 0); + if (0 != error) { + dev_err(dev, + "Power up HSL DAC and digital path %d", + error); + return error; + } + + /* + * Disable short detection. Pull Down output to ground, + * Use local oscillator, Gain change without zero cross control + */ + error = HW_ACODEC_MODIFY_WRITE(SHORT_CIRCUIT_DISABLE_REG, + HS_SHORT_DIS|HS_PULL_DOWN_EN|HS_OSC_EN|HS_ZCD_DIS, 0); + if (0 != error) { + dev_err(dev, "Disable short detection." + "Pull Down output to ground,Use local oscillator,Gain" + "change without zero cross control %d", error); + return error; + } + } + + /* Enable DA2 for HSR */ + if (channel_index & e_CHANNEL_2) { + + /* Power Up HSR driver */ + error = HW_ACODEC_MODIFY_WRITE(ANALOG_OUTPUT_ENABLE_REG, + EN_HSR_MASK, 0); + if (0 != error) { + dev_err(dev, "Power Up HSR Driver %d", error); + return error; + } + + initialVal_DA = HW_REG_READ(DIGITAL_DA_CHANNELS_ENABLE_REG); + if (EN_DA2 & initialVal_DA) + return 0; + + error = HW_ACODEC_MODIFY_WRITE(SLOT_SELECTION_TO_DA2_REG, + SLOT09_FOR_DA_PATH, 0); + if (0 != error) { + dev_err(dev, + "Data sent to DA2 from Slot 09 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_MUXES_REG1, DA2_TO_HSR, + 0); + if (0 != error) { + dev_err(dev, + "DA_IN2 path mixed with sidetone FIR %d", + error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_DA_CHANNELS_ENABLE_REG, + EN_DA2, 0); + if (0 != error) { + dev_err(dev, "Power up HSR %d ", error); + return error; + } + + /* Power Up HSR DAC driver */ + error = HW_ACODEC_MODIFY_WRITE(ADC_DAC_ENABLE_REG, + POWER_UP_HSR_DAC, 0); + if (0 != error) { + dev_err(dev, "Power Up HSR DAC driver %d", error); + return error; + } + + /* Power up HSR DAC and digital path */ + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_OUTPUT_ENABLE_REG, + EN_HSR_MASK, 0); + if (0 != error) { + dev_err(dev, + "Power up HSR DAC and digital path %d", + error); + return error; + } + + /* + * TEST START .havent cleared the bits in power down.Disable short + * detection. Pull Down output to ground, Use local oscillator, + * Gain change without zero cross control + */ + + error = HW_ACODEC_MODIFY_WRITE(SHORT_CIRCUIT_DISABLE_REG, + HS_SHORT_DIS|HS_PULL_DOWN_EN|HS_OSC_EN|HS_ZCD_DIS, 0); + if (0 != error) { + dev_err(dev, "Disable short detection." + "Pull Down output to ground, Use local oscillator," + "Gain change without zero cross control %d", error); + return error; + } + /* TEST END */ + } + ste_audio_io_unmute_headset(channel_index, 0, dev); + dump_acodec_registers(__func__, dev); + return error; +} + +/** + * @brief Power down headset on a specific channel + * @channel_index Channel-index of headset + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_power_down_headset(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + int error = 0; + unsigned char initialVal_DA = 0; + unsigned long end_time; + + /* Check if HS Power Down request is mono or Stereo channel */ + if (!(channel_index & (e_CHANNEL_1 | e_CHANNEL_2))) { + dev_err(dev, "HS should have mono or stereo channels"); + return -EINVAL; + } + + /* Disable Negative Charge Pump */ + error = HW_ACODEC_MODIFY_WRITE(NCP_ENABLE_HS_AUTOSTART_REG, + (EN_NEG_CP|HS_AUTO_EN), 0); + if (0 != error) { + dev_err(dev, "NCP not fully controlled with EnCpHs bit %d", + error); + return error; + } + error = HW_ACODEC_MODIFY_WRITE(NCP_ENABLE_HS_AUTOSTART_REG, 0, + EN_NEG_CP); + if (0 != error) { + dev_err(dev, "Disable Negative Charge Pump %d", error); + return error; + } + + /* Wait for negative charge pump to stop */ + end_time = jiffies + msecs_to_jiffies(NCP_TIMEOUT); + while ((HW_REG_READ(IRQ_STATUS_MSB_REG) & NCP_READY_MASK) + && time_after_eq(end_time, jiffies)) { + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(1); + } + + if (HW_REG_READ(IRQ_STATUS_MSB_REG) & NCP_READY_MASK) { + error = -EFAULT; + dev_err(dev, "Negative Charge Pump stop error % d", error); + return error; + } + + if (channel_index & e_CHANNEL_1) { + initialVal_DA = HW_REG_READ(DIGITAL_DA_CHANNELS_ENABLE_REG); + if (!(initialVal_DA & EN_DA1)) + return 0; + + /* Power Down HSL driver */ + error = HW_ACODEC_MODIFY_WRITE(ANALOG_OUTPUT_ENABLE_REG, 0, + EN_HSL_MASK); + if (0 != error) { + dev_err(dev, "Power down HSL Driver %d", error); + return error; + } + + /* Power Down HSL DAC driver */ + error = HW_ACODEC_MODIFY_WRITE(ADC_DAC_ENABLE_REG, 0, + POWER_UP_HSL_DAC); + if (0 != error) { + dev_err(dev, "Power Up HSL DAC Driver %d", error); + return error; + } + + /* Power Down HSL DAC and digital path */ + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_OUTPUT_ENABLE_REG, 0, + EN_HSL_MASK); + if (0 != error) { + dev_err(dev, + "Power down HSL DAC and digital path %d", + error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_DA_CHANNELS_ENABLE_REG, + 0, EN_DA1); + if (0 != error) { + dev_err(dev, "Disable DA1 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_MUXES_REG1, + 0, DA1_TO_HSL); + if (0 != error) { + dev_err(dev, + "Clear DA_IN1 path mixed with sidetone FIR %d", + error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(SLOT_SELECTION_TO_DA1_REG, 0, + SLOT08_FOR_DA_PATH); + if (0 != error) { + dev_err(dev, + "Data sent to DA1 cleared from Slot 08 %d", + error); + return error; + } + + + } + /* Enable DA2 for HSR */ + + if (channel_index & e_CHANNEL_2) { + initialVal_DA = HW_REG_READ(DIGITAL_DA_CHANNELS_ENABLE_REG); + if (!(initialVal_DA & EN_DA2)) + return 0; + + /* Power Down HSR driver */ + error = HW_ACODEC_MODIFY_WRITE(ANALOG_OUTPUT_ENABLE_REG, 0, + EN_HSR_MASK); + if (0 != error) { + dev_err(dev, "Power down HSR Driver %d", error); + return error; + } + + /* Power Down HSR DAC driver */ + error = HW_ACODEC_MODIFY_WRITE(ADC_DAC_ENABLE_REG, 0, + POWER_UP_HSR_DAC); + if (0 != error) { + dev_err(dev, "Power down HSR DAC Driver %d", error); + return error; + } + + /* Power Down HSR DAC and digital path */ + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_OUTPUT_ENABLE_REG, 0, + EN_HSR_MASK); + if (0 != error) { + dev_err(dev, + "Power down HSR DAC and digital path %d", + error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_DA_CHANNELS_ENABLE_REG, + 0, EN_DA2); + if (0 != error) { + dev_err(dev, "Disable DA2 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_MUXES_REG1, 0, + DA2_TO_HSR); + if (0 != error) { + dev_err(dev, + "Clear DA_IN2 path mixed with sidetone FIR %d", + error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(SLOT_SELECTION_TO_DA2_REG, 0, + SLOT09_FOR_DA_PATH); + if (0 != error) { + dev_err(dev, + "Data sent to DA2 cleared from Slot 09 %d", + error); + return error; + } + + } + dump_acodec_registers(__func__, dev); + return error; +} + +/** + * @brief Mute headset on a specific channel + * @channel_index Headeset channel-index + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_mute_headset(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + int error = 0; + /* Check if HS Mute request is mono or Stereo channel */ + if (!(channel_index & (e_CHANNEL_1 | e_CHANNEL_2))) { + dev_err(dev, "HS should have mono or stereo channels"); + return -EINVAL; + } + + if (channel_index & e_CHANNEL_1) { + /* Mute HSL */ + error = HW_ACODEC_MODIFY_WRITE(MUTE_HS_EAR_REG, + EN_HSL_MASK | EN_HSL_DAC_MASK, + 0); + if (0 != error) { + dev_err(dev, "Mute HSL %d", error); + return error; + } + } + + if (channel_index & e_CHANNEL_2) { + /* Mute HSR */ + error = HW_ACODEC_MODIFY_WRITE(MUTE_HS_EAR_REG, + EN_HSR_MASK | EN_HSR_DAC_MASK, + 0); + if (0 != error) { + dev_err(dev, "Mute HSR %d", error); + return error; + } + } + + dump_acodec_registers(__func__, dev); + return error; +} + +/** + * @brief Unmute headset on a specific channel + * @channel_index Headeset channel-index + * @gain Gain index of headset + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_unmute_headset(enum AUDIOIO_CH_INDEX channel_index, int *gain, + struct device *dev) +{ + int error = 0; + + /* Check if HS UnMute request is mono or Stereo channel */ + if (!(channel_index & (e_CHANNEL_1 | e_CHANNEL_2))) { + dev_err(dev, "HS should have mono or stereo channels"); + return -EINVAL; + } + + if (channel_index & e_CHANNEL_1) { + /* UnMute HSL */ + error = HW_ACODEC_MODIFY_WRITE(MUTE_HS_EAR_REG, 0, + EN_HSL_MASK | EN_HSL_DAC_MASK); + if (0 != error) { + dev_err(dev, "UnMute HSL %d", error); + return error; + } + } + + if (channel_index & e_CHANNEL_2) { + /* UnMute HSR */ + error = HW_ACODEC_MODIFY_WRITE(MUTE_HS_EAR_REG, 0, + EN_HSR_MASK | EN_HSR_DAC_MASK); + if (0 != error) { + dev_err(dev, "UnMute HSR %d", error); + return error; + } + } + dump_acodec_registers(__func__, dev); + return error; +} + +/** + * @brief Enables fading of headset on a specific channel + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_enable_fade_headset(struct device *dev) +{ + int error = 0; + + error = HW_ACODEC_MODIFY_WRITE(SHORT_CIRCUIT_DISABLE_REG, + 0, DIS_HS_FAD); + if (0 != error) { + dev_err(dev, "Enable fading for HS %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DA1_DIGITAL_GAIN_REG, 0, DIS_FADING); + if (0 != error) { + dev_err(dev, "Enable fading for HSL %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(HSL_EAR_DIGITAL_GAIN_REG, 0, + DIS_DIG_GAIN_FADING); + if (0 != error) { + dev_err(dev, "Enable fading for Digital Gain of HSL %d", + error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DA2_DIGITAL_GAIN_REG, 0, DIS_FADING); + if (0 != error) { + dev_err(dev, "Enable fading for HSR %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(HSR_DIGITAL_GAIN_REG, 0, + DIS_DIG_GAIN_FADING); + if (0 != error) { + dev_err(dev, "Enable fading for Digital Gain of HSR %d", + error); + return error; + } + + return error; +} +/** + * @brief Disables fading of headset on a specific channel + * @return 0 on success otherwise negative error code + */ + +int ste_audio_io_disable_fade_headset(struct device *dev) +{ + int error = 0; + error = HW_ACODEC_MODIFY_WRITE(SHORT_CIRCUIT_DISABLE_REG, + DIS_HS_FAD, 0); + if (0 != error) { + dev_err(dev, "Disable fading for HS %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DA1_DIGITAL_GAIN_REG, DIS_FADING, 0); + if (0 != error) { + dev_err(dev, "Disable fading for HSL %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(HSL_EAR_DIGITAL_GAIN_REG, + DIS_DIG_GAIN_FADING, 0); + if (0 != error) { + dev_err(dev, "Disable fading for Digital Gain of HSL %d", + error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DA2_DIGITAL_GAIN_REG, DIS_FADING, 0); + if (0 != error) { + dev_err(dev, "Disable fading for HSR %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(HSR_DIGITAL_GAIN_REG, + DIS_DIG_GAIN_FADING, 0); + if (0 != error) { + dev_err(dev, "Disable fading for Digital Gain of HSR %d", + error); + return error; + } + return error; +} +/** + * @brief Power up earpiece + * @channel_index Channel-index + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_power_up_earpiece(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + int error = 0; + unsigned char initialVal_DA = 0; + + /* Check if Earpiece PowerUp request is mono channel */ + if (!(channel_index & e_CHANNEL_1)) { + dev_err(dev, "EARPIECE should have mono channel"); + return -EINVAL; + } + + initialVal_DA = HW_REG_READ(DIGITAL_DA_CHANNELS_ENABLE_REG); + + /* Check if Earpiece is already powered up or DA1 being used by HS */ + if (EN_DA1 & initialVal_DA) + return 0; + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_MUXES_REG1, + DA1_TO_HSL, 0); + if (0 != error) { + dev_err(dev, + "DA_IN1 path mixed with sidetone FIR %d", error); + return error; + } + + /* Enable DA1 */ + error = HW_ACODEC_MODIFY_WRITE(SLOT_SELECTION_TO_DA1_REG, + SLOT08_FOR_DA_PATH, 0); + if (0 != error) { + dev_err(dev, "Data sent to DA1 from Slot 08 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_DA_CHANNELS_ENABLE_REG, + EN_DA1, 0); + if (0 != error) { + dev_err(dev, "Enable DA1 %d", error); + return error; + } + + /* Power Up EAR class-AB driver */ + error = HW_ACODEC_MODIFY_WRITE(ANALOG_OUTPUT_ENABLE_REG, + EN_EAR_MASK, 0); + if (0 != error) { + dev_err(dev, "Power Up EAR class-AB driver %d", error); + return error; + } + + /* Power up EAR DAC and digital path */ + error = HW_ACODEC_MODIFY_WRITE( + DIGITAL_OUTPUT_ENABLE_REG, EN_EAR_MASK, 0); + if (0 != error) { + dev_err(dev, "Power up EAR DAC and digital path %d", error); + return error; + } + dump_acodec_registers(__func__, dev); + return error; +} +/** + * @brief Power down earpiece + * @channel_index Channel-index + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_power_down_earpiece(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + int error = 0; + unsigned char initialVal_DA = 0; + + /* Check if Earpiece PowerDown request is mono channel */ + if (!(channel_index & e_CHANNEL_1)) { + dev_err(dev, "EARPIECE should have mono channel"); + return -EINVAL; + } + + /* Check if Earpiece is already powered down or DA1 being used by HS */ + initialVal_DA = HW_REG_READ(DIGITAL_DA_CHANNELS_ENABLE_REG); + if (!(initialVal_DA & EN_DA1)) + return 0; + + /* Power Down EAR class-AB driver */ + error = HW_ACODEC_MODIFY_WRITE(ANALOG_OUTPUT_ENABLE_REG, + 0, EN_EAR_MASK); + if (0 != error) { + dev_err(dev, "Power Down EAR class-AB driver %d", error); + return error; + } + + /* Power Down EAR DAC and digital path */ + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_OUTPUT_ENABLE_REG, + 0, EN_EAR_MASK); + if (0 != error) { + dev_err(dev, + "Power Down EAR DAC and digital path %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_MUXES_REG1, 0, DA1_TO_HSL); + if (0 != error) { + dev_err(dev, + "Clear DA_IN1 path mixed with sidetone FIR %d", + error); + return error; + } + + /* Disable DA1 */ + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_DA_CHANNELS_ENABLE_REG, + 0, EN_DA1); + if (0 != error) { + dev_err(dev, "Disable DA1 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(SLOT_SELECTION_TO_DA1_REG, 0, + SLOT08_FOR_DA_PATH); + if (0 != error) { + dev_err(dev, + "Data sent to DA1 cleared from Slot 08 %d", error); + return error; + } + dump_acodec_registers(__func__, dev); + return error; +} +/** + * @brief Mute earpiece + * @channel_index Channel-index + * @return 0 on success otherwise negative error code + */ + +int ste_audio_io_mute_earpiece(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + int error = 0; + + /* Check if Earpiece Mute request is mono channel */ + if (!(channel_index & e_CHANNEL_1)) { + dev_err(dev, "EARPIECE should have mono channel"); + return -EINVAL; + } + + /* Mute Earpiece */ + error = HW_ACODEC_MODIFY_WRITE(MUTE_HS_EAR_REG, + EN_EAR_MASK | EN_EAR_DAC_MASK, 0); + if (0 != error) { + dev_err(dev, "Mute Earpiece %d", error); + return error; + } + dump_acodec_registers(__func__, dev); + return error; +} +/** + * @brief Unmute earpiece + * @channel_index Channel-index + * @gain Gain index of earpiece + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_unmute_earpiece(enum AUDIOIO_CH_INDEX channel_index, int *gain, + struct device *dev) +{ + int error = 0; + + /* Check if Earpiece UnMute request is mono channel */ + if (!(channel_index & e_CHANNEL_1)) { + dev_err(dev, "EARPIECE should have mono channel"); + return -EINVAL; + } + + /* UnMute Earpiece */ + error = HW_ACODEC_MODIFY_WRITE(MUTE_HS_EAR_REG, 0, + EN_EAR_MASK | EN_EAR_DAC_MASK); + if (0 != error) { + dev_err(dev, "UnMute Earpiece %d", error); + return error; + } + dump_acodec_registers(__func__, dev); + return error; +} +/** + * @brief Enables fading of earpiece + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_enable_fade_earpiece(struct device *dev) +{ + int error = 0; + + error = HW_ACODEC_MODIFY_WRITE(DA1_DIGITAL_GAIN_REG, 0, DIS_FADING); + if (0 != error) { + dev_err(dev, "Enable fading for Ear %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(HSL_EAR_DIGITAL_GAIN_REG, 0, + DIS_DIG_GAIN_FADING); + if (0 != error) { + dev_err(dev, + "Enable fading for Digital Gain of Ear %d", error); + return error; + } + + return error; +} +/** + * @brief Disables fading of earpiece + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_disable_fade_earpiece(struct device *dev) +{ + int error = 0; + error = HW_ACODEC_MODIFY_WRITE(DA1_DIGITAL_GAIN_REG, DIS_FADING, 0); + if (0 != error) { + dev_err(dev, "Disable fading for Ear %d", error); + return error; + } + return error; +} +/** + * @brief Power up IHF on a specific channel + * @channel_index Channel-index + * @return 0 on success otherwise negative error code + */ + +int ste_audio_io_power_up_ihf(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + int error = 0; + unsigned char initialVal_DA = 0; + + /* Check if IHF PowerUp request is mono or Stereo channel */ + if (!(channel_index & (e_CHANNEL_1 | e_CHANNEL_2))) { + dev_err(dev, "IHF should have mono or stereo channels"); + return -EINVAL; + } + + if (channel_index & e_CHANNEL_1) { + initialVal_DA = HW_REG_READ(DIGITAL_DA_CHANNELS_ENABLE_REG); + if (EN_DA3 & initialVal_DA) + return 0; + + /* Enable DA3 for IHFL */ + error = HW_ACODEC_MODIFY_WRITE(SLOT_SELECTION_TO_DA3_REG, + SLOT10_FOR_DA_PATH, 0); + if (0 != error) { + dev_err(dev, "Data sent to DA3 from Slot 10 %d", + error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_DA_CHANNELS_ENABLE_REG, + EN_DA3, 0); + if (0 != error) { + dev_err(dev, "Power up IHFL %d", error); + return error; + } + + /* Power Up HFL Class-D driver */ + error = HW_ACODEC_MODIFY_WRITE(ANALOG_OUTPUT_ENABLE_REG, + EN_HFL_MASK, 0); + if (0 != error) { + dev_err(dev, "Power Up HFL Class-D Driver %d", error); + return error; + } + + /* Power up HFL Class D driver and digital path */ + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_OUTPUT_ENABLE_REG, + EN_HFL_MASK, 0); + if (0 != error) { + dev_err(dev, + "Power up HFL Class D driver & digital path %d", + error); + return error; + } + } + + /* Enable DA4 for IHFR */ + if (channel_index & e_CHANNEL_2) { + initialVal_DA = HW_REG_READ(DIGITAL_DA_CHANNELS_ENABLE_REG); + if (EN_DA4 & initialVal_DA) + return 0; + + error = HW_ACODEC_MODIFY_WRITE(SLOT_SELECTION_TO_DA4_REG, + SLOT11_FOR_DA_PATH, 0); + if (0 != error) { + dev_err(dev, "Data sent to DA4 from Slot 11 %d", + error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_DA_CHANNELS_ENABLE_REG, + EN_DA4, 0); + if (0 != error) { + dev_err(dev, "Enable DA4 %d", error); + return error; + } + + /* Power Up HFR Class-D driver */ + error = HW_ACODEC_MODIFY_WRITE(ANALOG_OUTPUT_ENABLE_REG, + EN_HFR_MASK, 0); + if (0 != error) { + dev_err(dev, "Power Up HFR Class-D Driver %d", error); + return error; + } + + /* Power up HFR Class D driver and digital path */ + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_OUTPUT_ENABLE_REG, + EN_HFR_MASK, 0); + if (0 != error) { + dev_err(dev, + "Power up HFR Class D driver and digital path %d", + error); + return error; + } + } + dump_acodec_registers(__func__, dev); + return error; +} +/** + * @brief Power down IHF on a specific channel + * @channel_index Channel-index + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_power_down_ihf(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + int error = 0; + unsigned char initialVal_DA = 0; + + /* Check if IHF Power Down request is mono or Stereo channel */ + if (!(channel_index & (e_CHANNEL_1 | e_CHANNEL_2))) { + dev_err(dev, "IHF should have mono or stereo channels"); + return -EINVAL; + } + + if (channel_index & e_CHANNEL_1) { + initialVal_DA = HW_REG_READ(DIGITAL_DA_CHANNELS_ENABLE_REG); + if (!(initialVal_DA & EN_DA3)) + return 0; + + /* Power Down HFL Class-D driver */ + error = HW_ACODEC_MODIFY_WRITE(ANALOG_OUTPUT_ENABLE_REG, 0, + EN_HFL_MASK); + if (0 != error) { + dev_err(dev, "Power Down HFL Class-D Driver %d", + error); + return error; + } + + /* Power Down HFL Class D driver and digital path */ + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_OUTPUT_ENABLE_REG, 0, + EN_HFL_MASK); + if (0 != error) { + dev_err(dev, + "Power Down HFL Class D driver & digital path %d", + error); + return error; + } + + /* Disable DA3 for IHFL */ + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_DA_CHANNELS_ENABLE_REG, + 0, EN_DA3); + if (0 != error) { + dev_err(dev, "Disable DA3 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(SLOT_SELECTION_TO_DA3_REG, 0, + SLOT10_FOR_DA_PATH); + if (0 != error) { + dev_err(dev, + "Data sent to DA3 cleared from Slot 10 %d", + error); + return error; + } + } + + if (channel_index & e_CHANNEL_2) { + initialVal_DA = HW_REG_READ(DIGITAL_DA_CHANNELS_ENABLE_REG); + + /* Check if IHF is already powered Down */ + if (!(initialVal_DA & EN_DA4)) + return 0; + + /* Power Down HFR Class-D Driver */ + error = HW_ACODEC_MODIFY_WRITE(ANALOG_OUTPUT_ENABLE_REG, 0, + EN_HFR_MASK); + if (0 != error) { + dev_err(dev, "Power Down HFR Class-D Driver %d", + error); + return error; + } + + /* Power Down HFR Class D driver and digital path */ + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_OUTPUT_ENABLE_REG, 0, + EN_HFR_MASK); + if (0 != error) { + dev_err(dev, + "Power Down HFR Class D driver & digital path %d", + error); + return error; + } + + /* Disable DA4 for IHFR */ + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_DA_CHANNELS_ENABLE_REG, + 0, EN_DA4); + if (0 != error) { + dev_err(dev, "Disable DA4 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(SLOT_SELECTION_TO_DA4_REG, 0, + SLOT11_FOR_DA_PATH); + if (0 != error) { + dev_err(dev, + "Data sent to DA4 cleared from Slot 11 %d", + error); + return error; + } + } + dump_acodec_registers(__func__, dev); + return error; +} +/** + * @brief Mute IHF on a specific channel + * @channel_index Channel-index + * @return 0 on success otherwise negative error code + */ + +int ste_audio_io_mute_ihf(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + int error = 0; + + if ((channel_index & (e_CHANNEL_1 | e_CHANNEL_2))) { + error = ste_audio_io_set_ihf_gain(channel_index, 0, -63, + 0, dev); + if (0 != error) { + dev_err(dev, "Mute ihf %d", error); + return error; + } + } + dump_acodec_registers(__func__, dev); + return error; +} +/** + * @brief Unmute IHF on a specific channel + * @channel_index Channel-index + * @gain Gain index of IHF + * @return 0 on success otherwise negative error code + */ + +int ste_audio_io_unmute_ihf(enum AUDIOIO_CH_INDEX channel_index, int *gain, + struct device *dev) +{ + int error = 0; + + if (channel_index & e_CHANNEL_1) { + error = ste_audio_io_set_ihf_gain(channel_index, 0, gain[0], + 0, dev); + if (0 != error) { + dev_err(dev, "UnMute ihf %d", error); + return error; + } + } + + if (channel_index & e_CHANNEL_2) { + error = ste_audio_io_set_ihf_gain(channel_index, 0, gain[1], + 0, dev); + if (0 != error) { + dev_err(dev, "UnMute ihf %d", error); + return error; + } + } + dump_acodec_registers(__func__, dev); + return error; +} +/** + * @brief Enable fading of IHF + * @return 0 on success otherwise negative error code + */ + +int ste_audio_io_enable_fade_ihf(struct device *dev) +{ + int error = 0; + + error = HW_ACODEC_MODIFY_WRITE(DA3_DIGITAL_GAIN_REG, 0, DIS_FADING); + if (0 != error) { + dev_err(dev, "Enable fading for HFL %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DA4_DIGITAL_GAIN_REG, 0, DIS_FADING); + if (0 != error) { + dev_err(dev, "Enable fading for HFR %d", error); + return error; + } + return error; +} +/** + * @brief Disable fading of IHF + * @return 0 on success otherwise negative error code + */ + +int ste_audio_io_disable_fade_ihf(struct device *dev) +{ + int error = 0; + + error = HW_ACODEC_MODIFY_WRITE(DA3_DIGITAL_GAIN_REG, DIS_FADING, 0); + if (0 != error) { + dev_err(dev, "Disable fading for HFL %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DA4_DIGITAL_GAIN_REG, DIS_FADING, 0); + if (0 != error) { + dev_err(dev, "Disable fading for HFR %d", error); + return error; + } + return error; +} +/** + * @brief Power up VIBL + * @channel_index Channel-index of VIBL + * @return 0 on success otherwise negative error code + */ + +int ste_audio_io_power_up_vibl(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + int error = 0; + unsigned char initialVal_DA = 0; + + /* Check if VibL PowerUp request is mono channel */ + if (!(channel_index & e_CHANNEL_1)) { + dev_err(dev, "VibL should have mono channel"); + return -EINVAL; + } + + /* Try to allocate vibrator for audio left channel */ + error = ste_audioio_vibrator_alloc(STE_AUDIOIO_CLIENT_AUDIO_L, + STE_AUDIOIO_CLIENT_AUDIO_R | STE_AUDIOIO_CLIENT_AUDIO_L); + if (error) { + dev_err(dev, " Couldn't allocate vibrator %d, client %d", + error, STE_AUDIOIO_CLIENT_AUDIO_L); + return error; + } + + initialVal_DA = HW_REG_READ(DIGITAL_DA_CHANNELS_ENABLE_REG); + + /* Check if VibL is already powered up */ + if (initialVal_DA & EN_DA5) + return 0; + + /* Enable DA5 for vibl */ + error = HW_ACODEC_MODIFY_WRITE(SLOT_SELECTION_TO_DA5_REG, + SLOT12_FOR_DA_PATH, 0); + if (0 != error) { + dev_err(dev, "Data sent to DA5 from Slot 12 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_DA_CHANNELS_ENABLE_REG, + EN_DA5, 0); + if (0 != error) { + dev_err(dev, "Enable DA5 for VibL %d", error); + return error; + } + + /* Power Up VibL Class-D driver */ + error = HW_ACODEC_MODIFY_WRITE( + ANALOG_OUTPUT_ENABLE_REG, EN_VIBL_MASK, 0); + if (0 != error) { + dev_err(dev, "Power Up VibL Class-D Driver %d", error); + return error; + } + + /* Power up VibL Class D driver and digital path */ + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_OUTPUT_ENABLE_REG, + EN_VIBL_MASK, 0); + if (0 != error) { + dev_err(dev, + "Power up VibL Class D driver and digital path %d", + error); + return error; + } + return error; +} +/** + * @brief Power down VIBL + * @channel_index Channel-index of VIBL + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_power_down_vibl(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + int error = 0; + unsigned char initialVal_DA = 0; + + /* Check if VibL Power Down request is mono channel */ + if (!(channel_index & e_CHANNEL_1)) { + dev_err(dev, "VibL should have mono channel"); + return -EINVAL; + } + + initialVal_DA = HW_REG_READ(DIGITAL_DA_CHANNELS_ENABLE_REG); + + /* Check if VibL is already powered down */ + if (!(initialVal_DA & EN_DA5)) + return 0; + + + /* Power Down VibL Class-D driver */ + error = HW_ACODEC_MODIFY_WRITE(ANALOG_OUTPUT_ENABLE_REG, + 0, EN_VIBL_MASK); + if (0 != error) { + dev_err(dev, "Power Down VibL Class-D Driver %d", error); + return error; + } + + /* Power Down VibL Class D driver and digital path */ + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_OUTPUT_ENABLE_REG, 0, + EN_VIBL_MASK); + if (0 != error) { + dev_err(dev, + "Power Down VibL Class D driver & digital path %d", + error); + return error; + } + + /* Disable DA5 for VibL */ + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_DA_CHANNELS_ENABLE_REG, + 0, EN_DA5); + if (0 != error) { + dev_err(dev, "Disable DA5 for VibL %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(SLOT_SELECTION_TO_DA5_REG, 0, + SLOT12_FOR_DA_PATH); + if (0 != error) { + dev_err(dev, + "Data sent to DA5 cleared from Slot 12 %d", error); + return error; + } + + /* Release vibrator */ + ste_audioio_vibrator_release(STE_AUDIOIO_CLIENT_AUDIO_L); + + return error; +} +/** + * @brief Enable fading of VIBL + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_enable_fade_vibl(struct device *dev) +{ + int error = 0; + + error = HW_ACODEC_MODIFY_WRITE(DA5_DIGITAL_GAIN_REG, 0, DIS_FADING); + if (0 != error) { + dev_err(dev, "Enable fading for VibL %d", error); + return error; + } + return error; +} +/** + * @brief Disable fading of VIBL + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_disable_fade_vibl(struct device *dev) +{ + int error = 0; + + error = HW_ACODEC_MODIFY_WRITE(DA5_DIGITAL_GAIN_REG, DIS_FADING, 0); + if (0 != error) { + dev_err(dev, "Disable fading for VibL %d", error); + return error; + } + return error; +} +/** + * @brief Power up VIBR + * @channel_index Channel-index of VIBR + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_power_up_vibr(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + int error = 0; + unsigned char initialVal_DA = 0; + + /* Check if VibR PowerUp request is mono channel */ + if (!(channel_index & e_CHANNEL_1)) { + dev_err(dev, "VibR should have mono channel"); + return -EINVAL; + } + + /* Try to allocate vibrator for audio right channel */ + error = ste_audioio_vibrator_alloc(STE_AUDIOIO_CLIENT_AUDIO_R, + STE_AUDIOIO_CLIENT_AUDIO_R | STE_AUDIOIO_CLIENT_AUDIO_L); + if (error) { + dev_err(dev, " Couldn't allocate vibrator %d, client %d", + error, STE_AUDIOIO_CLIENT_AUDIO_R); + return error; + } + + initialVal_DA = HW_REG_READ(DIGITAL_DA_CHANNELS_ENABLE_REG); + + /* Check if VibR is already powered up */ + if (initialVal_DA & EN_DA6) + return 0; + + /* Enable DA6 for vibr */ + error = HW_ACODEC_MODIFY_WRITE(SLOT_SELECTION_TO_DA6_REG, + SLOT13_FOR_DA_PATH, 0); + if (0 != error) { + dev_err(dev, "Data sent to DA5 from Slot 13 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE( + DIGITAL_DA_CHANNELS_ENABLE_REG, EN_DA6, 0); + if (0 != error) { + dev_err(dev, "Enable DA6 for VibR %d", error); + return error; + } + + /* Power Up VibR Class-D driver */ + error = HW_ACODEC_MODIFY_WRITE( + ANALOG_OUTPUT_ENABLE_REG, EN_VIBR_MASK, 0); + if (0 != error) { + dev_err(dev, "Power Up VibR Class-D Driver %d", error); + return error; + } + + /* Power up VibR Class D driver and digital path */ + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_OUTPUT_ENABLE_REG, + EN_VIBR_MASK, 0); + if (0 != error) { + dev_err(dev, + "Power up VibR Class D driver & digital path %d", + error); + return error; + } + return error; +} +/** + * @brief Power down VIBR + * @channel_index Channel-index of VIBR + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_power_down_vibr(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + int error = 0; + unsigned char initialVal_DA = 0; + + /* Check if VibR PowerDown request is mono channel */ + if (!(channel_index & e_CHANNEL_1)) { + dev_err(dev, "VibR should have mono channel"); + return -EINVAL; + } + + initialVal_DA = HW_REG_READ(DIGITAL_DA_CHANNELS_ENABLE_REG); + + /* Check if VibR is already powered down */ + if (!(initialVal_DA & EN_DA6)) + return 0; + + + /* Power Down VibR Class-D driver */ + error = HW_ACODEC_MODIFY_WRITE(ANALOG_OUTPUT_ENABLE_REG, 0, + EN_VIBR_MASK); + if (0 != error) { + dev_err(dev, "Power Down VibR Class-D Driver %d", error); + return error; + } + + /* Power Down VibR Class D driver and digital path */ + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_OUTPUT_ENABLE_REG, 0, + EN_VIBR_MASK); + if (0 != error) { + dev_err(dev, + "Power Down VibR Class D driver & digital path %d", + error); + return error; + } + + /* Disable DA6 for VibR */ + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_DA_CHANNELS_ENABLE_REG, + 0, EN_DA6); + if (0 != error) { + dev_err(dev, "Disable DA6 for VibR %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(SLOT_SELECTION_TO_DA6_REG, 0, + SLOT13_FOR_DA_PATH); + if (0 != error) { + dev_err(dev, "Data sent to DA5 cleared from Slot 13 %d", + error); + return error; + } + + /* Release vibrator */ + ste_audioio_vibrator_release(STE_AUDIOIO_CLIENT_AUDIO_R); + + return error; +} +/** + * @brief Enable fading of VIBR + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_enable_fade_vibr(struct device *dev) +{ + int error = 0; + + error = HW_ACODEC_MODIFY_WRITE(DA6_DIGITAL_GAIN_REG, 0, DIS_FADING); + if (0 != error) { + dev_err(dev, "Enable fading for VibR %d", error); + return error; + } + return error; +} +/** + * @brief Disable fading of VIBR + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_disable_fade_vibr(struct device *dev) +{ + int error = 0; + + error = HW_ACODEC_MODIFY_WRITE(DA6_DIGITAL_GAIN_REG, DIS_FADING, 0); + if (0 != error) { + dev_err(dev, "Disable fading for VibR %d", error); + return error; + } + return error; +} +/** + * @brief Power up MIC1A + * @channel_index Channel-index of MIC1A + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_power_up_mic1a(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + int error = 0; + unsigned char initialVal_AD = 0; + + /* Check if Mic1 PowerUp request is mono channel */ + + if (!(channel_index & e_CHANNEL_1)) { + dev_err(dev, "MIC1 should have mono channel"); + return -EINVAL; + } + + initialVal_AD = HW_REG_READ(DIGITAL_DA_CHANNELS_ENABLE_REG); + /* Check if Mic1 is already powered up or used by Dmic3 */ + if (EN_AD3 & initialVal_AD) + return 0; + + error = HW_REG_WRITE(AD_ALLOCATION_TO_SLOT0_1_REG, DATA_FROM_AD_OUT3); + if (0 != error) { + dev_err(dev, "Slot 02 outputs data from AD_OUT3 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_AD_CHANNELS_ENABLE_REG, + EN_AD3, 0); + if (0 != error) { + dev_err(dev, "Enable AD3 for Mic1 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_MUXES_REG1, 0, + SEL_DMIC3_FOR_AD_OUT3); + if (0 != error) { + dev_err(dev, "Select ADC1 for AD_OUT3 %d", error); + return error; + } + + /* Select MIC1A */ + error = HW_ACODEC_MODIFY_WRITE(ADC_DAC_ENABLE_REG, 0, + SEL_MIC1B_CLR_MIC1A); + if (0 != error) { + dev_err(dev, "Select MIC1A %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(LINE_IN_MIC_CONF_REG, EN_MIC1, 0); + if (0 != error) { + dev_err(dev, "Power up Mic1 %d", error); + return error; + } + + /* Power Up ADC1 */ + error = HW_ACODEC_MODIFY_WRITE(ADC_DAC_ENABLE_REG, POWER_UP_ADC1, 0); + if (0 != error) { + dev_err(dev, "Power Up ADC1 %d", error); + return error; + } + +return error; +} +/** + * @brief Power down MIC1A + * @channel_index Channel-index of MIC1A + * @return 0 on success otherwise negative error code + */ + +int ste_audio_io_power_down_mic1a(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + int error = 0; + unsigned char initialVal_AD = 0; + + /* Check if Mic1 PowerDown request is mono channel */ + + if (!(channel_index & e_CHANNEL_1)) { + dev_err(dev, "Mic1 should have mono channel"); + return -EINVAL; + } + + initialVal_AD = HW_REG_READ(DIGITAL_DA_CHANNELS_ENABLE_REG); + /* Check if Mic1 is already powered down or used by Dmic3 */ + if (!(initialVal_AD & EN_AD3)) + return 0; + + error = HW_ACODEC_MODIFY_WRITE(LINE_IN_MIC_CONF_REG, 0, EN_MIC1); + if (0 != error) { + dev_err(dev, "Power Down Mic1 %d", error); + return error; + } + + /* Power Down ADC1 */ + error = HW_ACODEC_MODIFY_WRITE(ADC_DAC_ENABLE_REG, 0, POWER_UP_ADC1); + if (0 != error) { + dev_err(dev, "Power Down ADC1 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_AD_CHANNELS_ENABLE_REG, + 0, EN_AD3); + if (0 != error) { + dev_err(dev, "Disable AD3 for Mic1 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(AD_ALLOCATION_TO_SLOT2_3_REG, 0, + DATA_FROM_AD_OUT3); + if (0 != error) { + dev_err(dev, "Slot 02 outputs data cleared from AD_OUT3 %d", + error); + return error; + } + return error; +} +/** + * @brief Mute MIC1A + * @channel_index Channel-index of MIC1A + * @return 0 on success otherwise negative error code + */ + + +int ste_audio_io_mute_mic1a(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + int error = 0; + if (!(channel_index & e_CHANNEL_1)) { + dev_err(dev, "MIC1 should have mono channel"); + return -EINVAL; + } + + /* Mute mic1 */ + error = HW_ACODEC_MODIFY_WRITE(LINE_IN_MIC_CONF_REG, MUT_MIC1, 0); + if (0 != error) { + dev_err(dev, "Mute Mic1 %d", error); + return error; + } + return error; +} +/** + * @brief Unmute MIC1A + * @channel_index Channel-index of MIC1A + * @gain Gain index of MIC1A + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_unmute_mic1a(enum AUDIOIO_CH_INDEX channel_index, int *gain, + struct device *dev) +{ + int error = 0; + if (!(channel_index & e_CHANNEL_1)) { + dev_err(dev, "Mic1 should have mono channel"); + return -EINVAL; + } + /* UnMute mic1 */ + error = HW_ACODEC_MODIFY_WRITE(LINE_IN_MIC_CONF_REG, 0, MUT_MIC1); + if (0 != error) { + dev_err(dev, "UnMute Mic1 %d", error); + return error; + } + return error; +} +/** + * @brief Enable fading of MIC1A + * @return 0 on success otherwise negative error code + */ + +int ste_audio_io_enable_fade_mic1a(struct device *dev) +{ + int error = 0; + + error = HW_ACODEC_MODIFY_WRITE(AD3_DIGITAL_GAIN_REG, 0, DIS_FADING); + if (0 != error) { + dev_err(dev, "Enable fading for Mic1 %d", error); + return error; + } + return error; +} +/** + * @brief Disable fading of MIC1A + * @return 0 on success otherwise negative error code + */ + +int ste_audio_io_disable_fade_mic1a(struct device *dev) +{ + int error = 0; + + error = HW_ACODEC_MODIFY_WRITE(AD3_DIGITAL_GAIN_REG, DIS_FADING, 0); + if (0 != error) { + dev_err(dev, "Disable fading for Mic1 %d", error); + return error; + } + return error; +} +/** + * @brief Power up MIC1B + * @channel_index Channel-index of MIC1B + * @return 0 on success otherwise negative error code + */ + +int ste_audio_io_power_up_mic1b(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + int error; + unsigned char initialVal_AD = 0; + + error = regulator_enable(regulator_avsource); + if (0 != error) { + dev_err(dev, "regulator avsource enable failed = %d", error); + return error; + } + /* GPIO35 settings to enable MIC 1B input instead of TVOUT */ + error = HW_ACODEC_MODIFY_WRITE(AB8500_GPIO_DIR5_REG, + GPIO35_DIR_OUTPUT, 0); + if (0 != error) { + dev_err(dev, "setting AB8500_GPIO_DIR5_REG reg %d", error); + return error; + } + error = HW_ACODEC_MODIFY_WRITE(AB8500_GPIO_OUT5_REG, + GPIO35_DIR_OUTPUT, 0); + if (0 != error) { + dev_err(dev, "setting AB8500_GPIO_OUT5_REG reg %d", error); + return error; + } + + if (!(channel_index & e_CHANNEL_1)) { + dev_err(dev, "Mic1 should have mono channel"); + return -EINVAL; + } + + initialVal_AD = HW_REG_READ(DIGITAL_AD_CHANNELS_ENABLE_REG); + /* Check if Mic1 is already powered up or used by Dmic3 */ + if (EN_AD3 & initialVal_AD) + return 0; + + error = HW_REG_WRITE(AD_ALLOCATION_TO_SLOT0_1_REG, DATA_FROM_AD_OUT3); + if (0 != error) { + dev_err(dev, "Slot 02 outputs data from AD_OUT3 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_AD_CHANNELS_ENABLE_REG, + EN_AD3, 0); + if (0 != error) { + dev_err(dev, "Enable AD3 for Mic1 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_MUXES_REG1, 0, + SEL_DMIC3_FOR_AD_OUT3); + if (0 != error) { + dev_err(dev, "Select ADC1 for AD_OUT3 %d", error); + return error; + } + + /* Select MIC1B */ + error = HW_ACODEC_MODIFY_WRITE(ADC_DAC_ENABLE_REG, SEL_MIC1B_CLR_MIC1A, + 0); + if (0 != error) { + dev_err(dev, "Select MIC1B %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(LINE_IN_MIC_CONF_REG, EN_MIC1, 0); + if (0 != error) { + dev_err(dev, "Power up Mic1 %d", error); + return error; + } + + /* Power Up ADC1 */ + error = HW_ACODEC_MODIFY_WRITE(ADC_DAC_ENABLE_REG, POWER_UP_ADC1, 0); + if (0 != error) { + dev_err(dev, "Power Up ADC1 %d", error); + return error; + } + return error; +} +/** + * @brief Power down MIC1B + * @channel_index Channel-index of MIC1B + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_power_down_mic1b(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + int error; + unsigned char initialVal_AD = 0; + + /* Check if Mic1 PowerDown request is mono channel */ + if (!(channel_index & e_CHANNEL_1)) { + dev_err(dev, "Mic1 should have mono channel"); + return -EINVAL; + } + + initialVal_AD = HW_REG_READ(DIGITAL_DA_CHANNELS_ENABLE_REG); + + /* Check if Mic1 is already powered down or used by Dmic3 */ + if (!(initialVal_AD & EN_AD3)) + return 0; + + + error = HW_ACODEC_MODIFY_WRITE(LINE_IN_MIC_CONF_REG, 0, EN_MIC1); + if (0 != error) { + dev_err(dev, "Power Down Mic1 %d", error); + return error; + } + + /* Power Down ADC1 */ + error = HW_ACODEC_MODIFY_WRITE(ADC_DAC_ENABLE_REG, 0, POWER_UP_ADC1); + if (0 != error) { + dev_err(dev, "Power Down ADC1 %d", error); + return error; + } + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_AD_CHANNELS_ENABLE_REG, 0, + EN_AD3); + if (0 != error) { + dev_err(dev, "Disable AD3 for Mic1 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(AD_ALLOCATION_TO_SLOT2_3_REG, 0, + DATA_FROM_AD_OUT3); + if (0 != error) { + dev_err(dev, "Slot 02 outputs data cleared from AD_OUT3 %d", + error); + return error; + } + + /* undo GPIO35 settings */ + error = HW_ACODEC_MODIFY_WRITE(AB8500_GPIO_DIR5_REG, + 0, GPIO35_DIR_OUTPUT); + if (0 != error) { + dev_err(dev, "resetting AB8500_GPIO_DIR5_REG reg %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(AB8500_GPIO_OUT5_REG, + 0, GPIO35_DIR_OUTPUT); + if (0 != error) { + dev_err(dev, "resetting AB8500_GPIO_OUT5_REG reg %d", error); + return error; + } + + error = regulator_disable(regulator_avsource); + if (0 != error) { + dev_err(dev, "regulator avsource disable failed = %d", error); + return error; + } + dump_acodec_registers(__func__, dev); + return error; +} + +/** + * @brief enable hardware loop of mic1b + * @chnl_index Channel-index of MIC1B + * @hw_loop type of hardware loop + * @loop_gain gain value to be used in hardware loop + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_enable_loop_mic1b(enum AUDIOIO_CH_INDEX chnl_index, + enum AUDIOIO_HAL_HW_LOOPS hw_loop, + int loop_gain, struct device *dev, + void *cookie) +{ + int error; + struct transducer_context_t *trnsdr; + trnsdr = (struct transducer_context_t *)cookie; + + switch (hw_loop) { + /* Check if HSL is active */ + case AUDIOIO_SIDETONE_LOOP: + if (!(trnsdr[HS_CH].is_power_up[e_CHANNEL_1]) + && !(trnsdr[EAR_CH].is_power_up[e_CHANNEL_1])) { + error = -EFAULT; + dev_err(dev, + "HS or Earpiece not powered up error = %d", + error); + return error; + } + + /* For ch1, Power On STFIR1, data comes from AD3*/ + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_MUXES_REG2, + FIR1_FROMAD3, 0); + if (error) + dev_err(dev, "FIR1 data comes from AD_OUT3 %d", + error); + error = HW_REG_WRITE(SIDETONE_FIR1_GAIN_REG, loop_gain); + if (error) { + dev_err(dev, + "Set FIR1 Gain index = %d", + error); + return error; + } + break; + default: + error = -EINVAL; + dev_err(dev, "loop not supported %d", error); + } + return error; +} + +/** + * @brief disable hardware loop of mic1b + * @chnl_index Channel-index of MIC1B + * @hw_loop type of hardware loop + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_disable_loop_mic1b(enum AUDIOIO_CH_INDEX chnl_index, + enum AUDIOIO_HAL_HW_LOOPS hw_loop, + struct device *dev, void *cookie) +{ + int error; + struct transducer_context_t *trnsdr; + trnsdr = (struct transducer_context_t *)cookie; + + switch (hw_loop) { + /* Check if HSL is active */ + case AUDIOIO_SIDETONE_LOOP: + if (!trnsdr[HS_CH].is_power_up[e_CHANNEL_1] + && !trnsdr[EAR_CH].is_power_up[e_CHANNEL_1]) { + error = -EFAULT; + dev_err(dev, "HS or Earpiece not powered up, err = %d", + error); + return error; + } + + /* For ch1, Power down STFIR1, data comes from AD3*/ + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_MUXES_REG2, + 0, FIR1_FROMAD3); + if (error) { + dev_err(dev, "FIR1 data comes from AD_OUT3, err = %d", + error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(FILTERS_CONTROL_REG, + 0, FIR_FILTERCONTROL); + if (error) { + dev_err(dev, + "ST FIR Filters disable failed %d", error); + return error; + } + break; + default: + error = -EINVAL; + dev_err(dev, "loop not supported %d", error); + } + return error; +} + +/** + * @brief Power up MIC2 + * @channel_index Channel-index of MIC2 + * @return 0 on success otherwise negative error code + */ + +int ste_audio_io_power_up_mic2(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + int error = 0; + unsigned char initialVal_AD = 0; + + /* Check if Mic2 PowerUp request is mono channel */ + if (!(channel_index & e_CHANNEL_1)) { + dev_err(dev, "Mic2 should have mono channel"); + return -EINVAL; + } + + initialVal_AD = HW_REG_READ(DIGITAL_DA_CHANNELS_ENABLE_REG); + + /* Check if Mic2 is already powered up or used by LINR or Dmic2 */ + if (EN_AD2 & initialVal_AD) + return 0; + + + error = HW_REG_WRITE(AD_ALLOCATION_TO_SLOT0_1_REG, DATA_FROM_AD_OUT2); + if (0 != error) { + dev_err(dev, "Slot 01 outputs data from AD_OUT2 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_AD_CHANNELS_ENABLE_REG, EN_AD2, + 0); + if (0 != error) { + dev_err(dev, "Enable AD2 for Mic2 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_MUXES_REG1, 0, + SEL_DMIC2_FOR_AD_OUT2); + if (0 != error) { + dev_err(dev, "Select ADC2 for AD_OUT2 %d", error); + return error; + } + + /* Select mic2 */ + error = HW_ACODEC_MODIFY_WRITE(ADC_DAC_ENABLE_REG, 0, + SEL_LINR_CLR_MIC2); + if (0 != error) { + dev_err(dev, "Select MIC2 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(LINE_IN_MIC_CONF_REG, EN_MIC2, 0); + if (0 != error) { + dev_err(dev, "Power up Mic2 %d", error); + return error; + } + + /* Power Up ADC1 */ + error = HW_ACODEC_MODIFY_WRITE(ADC_DAC_ENABLE_REG, POWER_UP_ADC2, 0); + if (0 != error) { + dev_err(dev, "Power Up ADC2 %d", error); + return error; + } + return error; +} +/** + * @brief Power down MIC2 + * @channel_index Channel-index of MIC2 + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_power_down_mic2(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + int error = 0; + unsigned char initialVal_AD = 0; + + /* Check if Mic2 PowerDown request is mono channel */ + if (!(channel_index & e_CHANNEL_1)) { + dev_err(dev, "Mic2 should have mono channel"); + return -EINVAL; + } + + initialVal_AD = HW_REG_READ(DIGITAL_DA_CHANNELS_ENABLE_REG); + + /* Check if Mic2 is already powered down or used by LINR or Dmic2 */ + if (!(initialVal_AD & EN_AD2)) + return 0; + + error = HW_ACODEC_MODIFY_WRITE(LINE_IN_MIC_CONF_REG, 0, EN_MIC2); + if (0 != error) { + dev_err(dev, "Power Down Mic2 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_AD_CHANNELS_ENABLE_REG, + 0, EN_AD2); + if (0 != error) { + dev_err(dev, "Disable AD2 for Mic2 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(AD_ALLOCATION_TO_SLOT0_1_REG, 0, + (DATA_FROM_AD_OUT2<<4)); + if (0 != error) { + dev_err(dev, "Slot 01 outputs data cleared from AD_OUT2 %d", + error); + return error; + } + return error; +} +/** + * @brief Mute MIC2 + * @channel_index Channel-index of MIC2 + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_mute_mic2(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + int error = 0; + if (!(channel_index & e_CHANNEL_1)) { + dev_err(dev, "Mic2 should have mono channel"); + return -EINVAL; + } + + /* Mute mic2 */ + error = HW_ACODEC_MODIFY_WRITE(LINE_IN_MIC_CONF_REG, MUT_MIC2, 0); + if (0 != error) { + dev_err(dev, "Mute Mic2 %d", error); + return error; + } + return error; +} +/** + * @brief Unmute MIC2 + * @channel_index Channel-index of MIC2 + * @gain Gain index of MIC2 + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_unmute_mic2(enum AUDIOIO_CH_INDEX channel_index, int *gain, + struct device *dev) +{ + int error = 0; + if (!(channel_index & e_CHANNEL_1)) { + dev_err(dev, "Mic2 should have mono channel"); + return -EINVAL; + } + /* UnMute mic2 */ + error = HW_ACODEC_MODIFY_WRITE(LINE_IN_MIC_CONF_REG, 0, MUT_MIC2); + if (0 != error) { + dev_err(dev, "UnMute Mic2 %d", error); + return error; + } + return error; +} +/** + * @brief Enable fading of MIC2 + * @return 0 on success otherwise negative error code + */ + +int ste_audio_io_enable_fade_mic2(struct device *dev) +{ + int error = 0; + + error = HW_ACODEC_MODIFY_WRITE(AD2_DIGITAL_GAIN_REG, 0, DIS_FADING); + if (0 != error) { + dev_err(dev, "Enable fading for Mic2 %d", error); + return error; + } + return error; +} +/** + * @brief Disable fading of MIC2 + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_disable_fade_mic2(struct device *dev) +{ + int error = 0; + + error = HW_ACODEC_MODIFY_WRITE(AD2_DIGITAL_GAIN_REG, DIS_FADING, 0); + if (0 != error) { + dev_err(dev, "Disable fading for Mic2 %d", error); + return error; + } + + return error; +} +/** + * @brief Power up LinIn + * @channel_index Channel-index of LinIn + * @return 0 on success otherwise negative error code + */ + +int ste_audio_io_power_up_lin(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + int error = 0; + unsigned char initialVal_AD = 0; + + /* Check if LinIn PowerUp request is mono or Stereo channel */ + if (!(channel_index & (e_CHANNEL_1 | e_CHANNEL_2))) { + dev_err(dev, "LinIn should have mono or stereo channels"); + return -EINVAL; + } + + /* Enable AD1 for LinInL */ + if (channel_index & e_CHANNEL_1) { + initialVal_AD = HW_REG_READ(DIGITAL_DA_CHANNELS_ENABLE_REG); + if (initialVal_AD & EN_AD1) + return 0; + + error = HW_ACODEC_MODIFY_WRITE(AD_ALLOCATION_TO_SLOT0_1_REG, + DATA_FROM_AD_OUT1, 0); + if (0 != error) { + dev_err(dev, "Slot 00 outputs data from AD_OUT1 %d", + error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_AD_CHANNELS_ENABLE_REG, + EN_AD1, 0); + if (0 != error) { + dev_err(dev, "Enable AD1 for LinInL %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_MUXES_REG1, 0, + SEL_DMIC1_FOR_AD_OUT1); + if (0 != error) { + dev_err(dev, "Select ADC3 for AD_OUT1 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE( + LINE_IN_MIC_CONF_REG, EN_LIN_IN_L, 0); + if (0 != error) { + dev_err(dev, "Power up LinInL %d", error); + return error; + } + + /* Power Up ADC3 */ + error = HW_ACODEC_MODIFY_WRITE(ADC_DAC_ENABLE_REG, + POWER_UP_ADC3, 0); + if (0 != error) { + dev_err(dev, "Power Up ADC3 %d", error); + return error; + } + } + /* Enable AD2 for LinInR */ + + if (channel_index & e_CHANNEL_2) { + initialVal_AD = HW_REG_READ(DIGITAL_DA_CHANNELS_ENABLE_REG); + if (EN_AD2 & initialVal_AD) + return 0; + + error = HW_ACODEC_MODIFY_WRITE(AD_ALLOCATION_TO_SLOT0_1_REG, + (DATA_FROM_AD_OUT2<<4), 0); + if (0 != error) { + dev_err(dev, "Slot 01 outputs data from AD_OUT2 %d", + error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_AD_CHANNELS_ENABLE_REG, + EN_AD2, 0); + if (0 != error) { + dev_err(dev, "Enable AD2 LinInR %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_MUXES_REG1, 0, + SEL_DMIC2_FOR_AD_OUT2); + if (0 != error) { + dev_err(dev, "Select ADC2 for AD_OUT2 %d", error); + return error; + } + + /* Select LinInR */ + error = HW_ACODEC_MODIFY_WRITE(ADC_DAC_ENABLE_REG, + SEL_LINR_CLR_MIC2, 0); + if (0 != error) { + dev_err(dev, "Select LinInR %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(LINE_IN_MIC_CONF_REG, + EN_LIN_IN_R, 0); + if (0 != error) { + dev_err(dev, "Power up LinInR %d", error); + return error; + } + + /* Power Up ADC2 */ + error = HW_ACODEC_MODIFY_WRITE( + ADC_DAC_ENABLE_REG, POWER_UP_ADC2, 0); + if (0 != error) { + dev_err(dev, "Power Up ADC2 %d", error); + return error; + } + } + return error; +} +/** + * @brief Power down LinIn + * @channel_index Channel-index of LinIn + * @return 0 on success otherwise negative error code + */ + +int ste_audio_io_power_down_lin(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + int error = 0; + unsigned char initialVal_AD = 0; + + /* Check if LinIn PowerDown request is mono or Stereo channel */ + if (!(channel_index & (e_CHANNEL_1 | e_CHANNEL_2))) { + dev_err(dev, "LinIn should have mono or stereo channels"); + return -EINVAL; + } + + /* Enable AD1 for LinInL */ + if (channel_index & e_CHANNEL_1) { + initialVal_AD = HW_REG_READ(DIGITAL_DA_CHANNELS_ENABLE_REG); + if (!(initialVal_AD & EN_AD1)) + return 0; + + error = HW_ACODEC_MODIFY_WRITE(LINE_IN_MIC_CONF_REG, 0, + EN_LIN_IN_L); + if (0 != error) { + dev_err(dev, "Power Down LinInL %d", error); + return error; + } + + /* Power Down ADC3 */ + error = HW_ACODEC_MODIFY_WRITE(ADC_DAC_ENABLE_REG, 0, + POWER_UP_ADC3); + if (0 != error) { + dev_err(dev, "Power Down ADC3 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_AD_CHANNELS_ENABLE_REG, + 0, EN_AD1); + if (0 != error) { + dev_err(dev, "Disable AD1 for LinInL %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(AD_ALLOCATION_TO_SLOT0_1_REG, 0, + DATA_FROM_AD_OUT1); + if (0 != error) { + dev_err(dev, + "Slot 00 outputs data cleared from AD_OUT1 %d", + error); + return error; + } + } + + /* Enable AD2 for LinInR */ + if (channel_index & e_CHANNEL_2) { + initialVal_AD = HW_REG_READ(DIGITAL_DA_CHANNELS_ENABLE_REG); + if (!(initialVal_AD & EN_AD2)) + return 0; + + error = HW_ACODEC_MODIFY_WRITE(LINE_IN_MIC_CONF_REG, 0, + EN_LIN_IN_R); + if (0 != error) { + dev_err(dev, "Power Down LinInR %d", error); + return error; + } + + /* Power Down ADC2 */ + error = HW_ACODEC_MODIFY_WRITE(ADC_DAC_ENABLE_REG, 0, + POWER_UP_ADC2); + if (0 != error) { + dev_err(dev, "Power Down ADC2 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_AD_CHANNELS_ENABLE_REG, + 0, EN_AD2); + if (0 != error) { + dev_err(dev, "Disable AD2 LinInR %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(AD_ALLOCATION_TO_SLOT0_1_REG, 0, + (DATA_FROM_AD_OUT2<<4)); + if (0 != error) { + dev_err(dev, + "Slot01 outputs data cleared from AD_OUT2 %d", + error); + return error; + } + } + return error; +} +/** + * @brief Mute LinIn + * @channel_index Channel-index of LinIn + * @return 0 on success otherwise negative error code + */ + +int ste_audio_io_mute_lin(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + int error = 0; + + if (!(channel_index & (e_CHANNEL_1 | e_CHANNEL_2))) { + dev_err(dev, "LinIn should have mono or stereo channels"); + return -EINVAL; + } + + if (channel_index & e_CHANNEL_1) { + /* Mute LinInL */ + error = HW_ACODEC_MODIFY_WRITE(LINE_IN_MIC_CONF_REG, + MUT_LIN_IN_L, 0); + if (0 != error) { + dev_err(dev, "Mute LinInL %d", error); + return error; + } + } + + if (channel_index & e_CHANNEL_2) { + /* Mute LinInR */ + error = HW_ACODEC_MODIFY_WRITE(LINE_IN_MIC_CONF_REG, + MUT_LIN_IN_R, + 0); + if (0 != error) { + dev_err(dev, "Mute LinInR %d", error); + return error; + } + } + return error; +} +/** + * @brief Unmute LinIn + * @channel_index Channel-index of LinIn + * @gain Gain index of LinIn + * @return 0 on success otherwise negative error code + */ + +int ste_audio_io_unmute_lin(enum AUDIOIO_CH_INDEX channel_index, int *gain, + struct device *dev) +{ + int error = 0; + + if (!(channel_index & (e_CHANNEL_1 | e_CHANNEL_2))) { + dev_err(dev, "LinIn should have mono or stereo channels"); + return -EINVAL; + } + + if (channel_index & e_CHANNEL_1) { + /* UnMute LinInL */ + error = HW_ACODEC_MODIFY_WRITE(LINE_IN_MIC_CONF_REG, 0, + MUT_LIN_IN_L); + if (0 != error) { + dev_err(dev, "UnMute LinInL %d", error); + return error; + } + } + + if (channel_index & e_CHANNEL_2) { + /* UnMute LinInR */ + error = HW_ACODEC_MODIFY_WRITE(LINE_IN_MIC_CONF_REG, 0, + MUT_LIN_IN_R); + if (0 != error) { + dev_err(dev, "UnMute LinInR %d", error); + return error; + } + } + return error; +} +/** + * @brief Enables fading of LinIn + * @return 0 on success otherwise negative error code + */ + +int ste_audio_io_enable_fade_lin(struct device *dev) +{ + int error = 0; + + error = HW_ACODEC_MODIFY_WRITE(AD1_DIGITAL_GAIN_REG, 0, DIS_FADING); + if (0 != error) { + dev_err(dev, "Enable fading for LinInL %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(AD2_DIGITAL_GAIN_REG, 0, DIS_FADING); + if (0 != error) { + dev_err(dev, "Enable fading for LinInR %d", error); + return error; + } + return error; +} +/** + * @brief Disables fading of LinIn + * @return 0 on success otherwise negative error code + */ + +int ste_audio_io_disable_fade_lin(struct device *dev) +{ + int error = 0; + + error = HW_ACODEC_MODIFY_WRITE(AD1_DIGITAL_GAIN_REG, DIS_FADING, 0); + if (0 != error) { + dev_err(dev, "Disable fading for LinInL %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(AD2_DIGITAL_GAIN_REG, DIS_FADING, 0); + if (0 != error) { + dev_err(dev, "Disable fading for LinInR %d", error); + return error; + } + return error; +} +/** + * @brief Power Up DMIC12 LinIn + * @channel_index Channel-index of DMIC12 + * @return 0 on success otherwise negative error code + */ + +int ste_audio_io_power_up_dmic12(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + int error = 0; + unsigned char initialVal_AD = 0; + + /* Check if DMic12 request is mono or Stereo */ + if (!(channel_index & (e_CHANNEL_1 | e_CHANNEL_2))) { + dev_err(dev, "DMic12 does not support more than 2 channels"); + + return -EINVAL; + } + + /* Setting Direction for GPIO pins on AB8500 */ + error = HW_REG_WRITE(AB8500_GPIO_DIR4_REG, GPIO27_DIR_OUTPUT); + if (0 != error) { + dev_err(dev, "Setting Direction for GPIO pins on AB8500 %d", + error); + return error; + } + + /* Enable AD1 for Dmic1 */ + if (channel_index & e_CHANNEL_1) { + /* Check if DMIC1 is already powered up or used by LinInL */ + initialVal_AD = HW_REG_READ(DIGITAL_AD_CHANNELS_ENABLE_REG); + if (initialVal_AD & EN_AD1) + return 0; + + error = HW_REG_WRITE(AD_ALLOCATION_TO_SLOT0_1_REG, + DATA_FROM_AD_OUT1); + if (0 != error) { + dev_err(dev, "Slot 00 outputs data from AD_OUT1 %d", + error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_AD_CHANNELS_ENABLE_REG, + EN_AD1, 0); + if (0 != error) { + dev_err(dev, "Enable AD1 for DMIC1 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_MUXES_REG1, + SEL_DMIC1_FOR_AD_OUT1, 0); + if (0 != error) { + dev_err(dev, "Select DMIC1 for AD_OUT1 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DMIC_ENABLE_REG, EN_DMIC1, 0); + if (0 != error) { + dev_err(dev, "Enable DMIC1 %d", error); + return error; + } + } + /* Enable AD2 for Dmic2 */ + + if (channel_index & e_CHANNEL_2) { + /* Check if DMIC2 is already powered up + or used by Mic2 or LinInR */ + initialVal_AD = HW_REG_READ(DIGITAL_AD_CHANNELS_ENABLE_REG); + if (initialVal_AD & EN_AD2) + return 0; + + error = HW_ACODEC_MODIFY_WRITE(AD_ALLOCATION_TO_SLOT0_1_REG, + (DATA_FROM_AD_OUT2<<4), 0); + if (0 != error) { + dev_err(dev, "Slot 01 outputs data from AD_OUT2 %d", + error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_AD_CHANNELS_ENABLE_REG, + EN_AD2, 0); + if (0 != error) { + dev_err(dev, "Enable AD2 for DMIC2 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_MUXES_REG1, + SEL_DMIC2_FOR_AD_OUT2, 0); + if (0 != error) { + dev_err(dev, "Select DMIC2 for AD_OUT2 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DMIC_ENABLE_REG, EN_DMIC2, 0); + if (0 != error) { + dev_err(dev, "Enable DMIC2 %d", error); + return error; + } + } + + return error; +} +/** + * @brief Power down DMIC12 LinIn + * @channel_index Channel-index of DMIC12 + * @return 0 on success otherwise negative error code + */ + + +int ste_audio_io_power_down_dmic12(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + int error = 0; + unsigned char initialVal_AD = 0; + + /* Check if DMic12 request is mono or Stereo or multi channel */ + if (!(channel_index & (e_CHANNEL_1 | e_CHANNEL_2))) { + dev_err(dev, "DMic12 does not support more than 2 channels"); + + return -EINVAL; + } + + /* Setting Direction for GPIO pins on AB8500 */ + error = HW_ACODEC_MODIFY_WRITE(AB8500_GPIO_DIR4_REG, 0, + GPIO27_DIR_OUTPUT); + if (0 != error) { + dev_err(dev, "Clearing Direction for GPIO pins on AB8500 %d", + error); + return error; + } + /* Enable AD1 for Dmic1 */ + if (channel_index & e_CHANNEL_1) { + /* Check if DMIC1 is already powered Down or used by LinInL */ + initialVal_AD = HW_REG_READ(DIGITAL_AD_CHANNELS_ENABLE_REG); + if (!(initialVal_AD & EN_AD1)) + return 0; + + error = HW_ACODEC_MODIFY_WRITE(DMIC_ENABLE_REG, 0, EN_DMIC1); + if (0 != error) { + dev_err(dev, "Enable DMIC1 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_AD_CHANNELS_ENABLE_REG, + 0, EN_AD1); + if (0 != error) { + dev_err(dev, "Disable AD1 for DMIC1 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(AD_ALLOCATION_TO_SLOT0_1_REG, 0, + DATA_FROM_AD_OUT1); + if (0 != error) { + dev_err(dev, + "Slot 00 outputs data cleared from AD_OUT1 %d", + error); + return error; + } + } + + /* Enable AD2 for Dmic2 */ + if (channel_index & e_CHANNEL_2) { + /* MIC2 is already powered Down or used by Mic2 or LinInR */ + initialVal_AD = HW_REG_READ(DIGITAL_AD_CHANNELS_ENABLE_REG); + if (!(initialVal_AD & EN_AD2)) + return 0; + + error = HW_ACODEC_MODIFY_WRITE(DMIC_ENABLE_REG, 0, EN_DMIC2); + if (0 != error) { + dev_err(dev, "Enable DMIC2 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_AD_CHANNELS_ENABLE_REG, + 0, EN_AD2); + if (0 != error) { + dev_err(dev, "Disable AD2 for DMIC2 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(AD_ALLOCATION_TO_SLOT0_1_REG, 0, + (DATA_FROM_AD_OUT2<<4)); + if (0 != error) { + dev_err(dev, + "Slot 01 outputs data cleared from AD_OUT2 %d", + error); + return error; + } + } + return error; +} +/** + * @brief Get headset gain + * @left_volume + * @right_volume + * @return 0 on success otherwise negative error code + */ + + +int ste_audio_io_get_headset_gain(int *left_volume, int *right_volume, + u16 gain_index, struct device *dev) +{ + int i = 0; + if (gain_index == 0) { + + *left_volume = 0 - HW_REG_READ(DA1_DIGITAL_GAIN_REG); + *right_volume = 0 - HW_REG_READ(DA2_DIGITAL_GAIN_REG); + + } + + if (gain_index == 1) { + *left_volume = 8 - HW_REG_READ(HSL_EAR_DIGITAL_GAIN_REG); + *right_volume = 8 - HW_REG_READ(HSR_DIGITAL_GAIN_REG); + } + + if (gain_index == 2) { + i = (HW_REG_READ(ANALOG_HS_GAIN_REG)>>4); + *left_volume = hs_analog_gain_table[i]; + i = (HW_REG_READ(ANALOG_HS_GAIN_REG) & MASK_QUARTET0); + *right_volume = hs_analog_gain_table[i]; + } + return 0; +} +/** + * @brief Get earpiece gain + * @left_volume + * @right_volume + * @return 0 on success otherwise negative error code + */ + + +int ste_audio_io_get_earpiece_gain(int *left_volume, int *right_volume, + u16 gain_index, struct device *dev) +{ + if (0 == gain_index) + *left_volume = 0 - HW_REG_READ(DA1_DIGITAL_GAIN_REG); + if (1 == gain_index) + *left_volume = 8 - HW_REG_READ(HSL_EAR_DIGITAL_GAIN_REG); + return 0; +} +/** + * @brief Get ihf gain + * @left_volume + * @right_volume + * @return 0 on success otherwise negative error code + */ + +int ste_audio_io_get_ihf_gain(int *left_volume, int *right_volume, + u16 gain_index, struct device *dev) +{ + + *left_volume = 0 - HW_REG_READ(DA3_DIGITAL_GAIN_REG); + *right_volume = 0 - HW_REG_READ(DA4_DIGITAL_GAIN_REG); + return 0; +} +/** + * @brief Get vibl gain + * @left_volume + * @right_volume + * @return 0 on success otherwise negative error code + */ + +int ste_audio_io_get_vibl_gain(int *left_volume, int *right_volume, + u16 gain_index, struct device *dev) +{ + + *left_volume = 0 - HW_REG_READ(DA5_DIGITAL_GAIN_REG); + + return 0; +} +/** + * @brief Get vibr gain + * @left_volume + * @right_volume + * @return 0 on success otherwise negative error code + */ + + +int ste_audio_io_get_vibr_gain(int *left_volume, int *right_volume, + u16 gain_index, struct device *dev) +{ + + *right_volume = 0 - HW_REG_READ(DA6_DIGITAL_GAIN_REG); + return 0; +} +/** + * @brief Get MIC1A & MIC2A gain + * @left_volume + * @right_volume + * @return 0 on success otherwise negative error code + */ + + +int ste_audio_io_get_mic1a_gain(int *left_volume, int *right_volume, + u16 gain_index, struct device *dev) +{ + if (gain_index == 0) + *left_volume = 31 - HW_REG_READ(AD3_DIGITAL_GAIN_REG); + if (gain_index == 1) + *left_volume = HW_REG_READ(ANALOG_MIC1_GAIN_REG); + + return 0; +} +/** + * @brief Get MIC2 gain + * @left_volume + * @right_volume + * @return 0 on success otherwise negative error code + */ + +int ste_audio_io_get_mic2_gain(int *left_volume, int *right_volume, + u16 gain_index, struct device *dev) +{ + if (gain_index == 0) + *left_volume = 31 - HW_REG_READ(AD2_DIGITAL_GAIN_REG); + if (gain_index == 1) + *left_volume = HW_REG_READ(ANALOG_MIC2_GAIN_REG); + + return 0; +} +/** + * @brief Get Lin IN gain + * @left_volume + * @right_volume + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_get_lin_gain(int *left_volume, int *right_volume, + u16 gain_index, struct device *dev) +{ + if (gain_index == 0) { + *left_volume = 31 - HW_REG_READ(AD1_DIGITAL_GAIN_REG); + *right_volume = 31 - HW_REG_READ(AD2_DIGITAL_GAIN_REG); + } + + if (gain_index == 0) { + *left_volume = 2 * ((HW_REG_READ(ANALOG_HS_GAIN_REG)>>4) - 5); + *right_volume = 2 * (HW_REG_READ(ANALOG_LINE_IN_GAIN_REG) - 5); + } + + return 0; +} +/** + * @brief Get DMIC12 gain + * @left_volume + * @right_volume + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_get_dmic12_gain(int *left_volume, int *right_volume, + u16 gain_index, struct device *dev) +{ + + *left_volume = HW_REG_READ(AD1_DIGITAL_GAIN_REG); + + *right_volume = HW_REG_READ(AD2_DIGITAL_GAIN_REG); + + return 0; +} +/** + * @brief Get DMIC34 gain + * @left_volume + * @right_volume + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_get_dmic34_gain(int *left_volume, int *right_volume, + u16 gain_index, struct device *dev) +{ + *left_volume = HW_REG_READ(AD3_DIGITAL_GAIN_REG); + *right_volume = HW_REG_READ(AD4_DIGITAL_GAIN_REG); + + return 0; +} +/** + * @brief Get DMIC56 gain + * @left_volume + * @right_volume + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_get_dmic56_gain(int *left_volume, int *right_volume, + u16 gain_index, struct device *dev) +{ + *left_volume = HW_REG_READ(AD5_DIGITAL_GAIN_REG); + + *right_volume = HW_REG_READ(AD6_DIGITAL_GAIN_REG); + return 0; +} +/** + * @brief Set gain of headset along a specified channel + * @channel_index Channel-index of headset + * @gain_index Gain index of headset + * @gain_value Gain value of headset + * @linear + * @return 0 on success otherwise negative error code + */ + + +int ste_audio_io_set_headset_gain(enum AUDIOIO_CH_INDEX channel_index, + u16 gain_index, int gain_value, u32 linear, + struct device *dev) +{ + int error = 0; + unsigned char initial_val = 0; + int i = 0; + int acodec_device_id; + + acodec_device_id = abx500_get_chip_id(dev); + + if (channel_index & e_CHANNEL_1) { + if (gain_index == 0) { + int gain = 0; + gain = 0 - gain_value; + + initial_val = HW_REG_READ(DA1_DIGITAL_GAIN_REG); + /* Write gain */ + error = HW_REG_WRITE(DA1_DIGITAL_GAIN_REG, + ((initial_val + & (~DIGITAL_GAIN_MASK)) | (gain & DIGITAL_GAIN_MASK))); + if (0 != error) { + dev_err(dev, + "Set Gain HSL gainindex = %d %d", + gain_index, error); + return error; + } + } + + if (gain_index == 1) { + int gain = 0; + gain = 8 - gain_value; + + initial_val = HW_REG_READ(HSL_EAR_DIGITAL_GAIN_REG); + /* Write gain */ + error = HW_REG_WRITE(HSL_EAR_DIGITAL_GAIN_REG, + ((initial_val & (~HS_DIGITAL_GAIN_MASK)) | (gain & + HS_DIGITAL_GAIN_MASK))); + if (0 != error) { + dev_err(dev, + "Set Gain HSL gain index = %d %d", + gain_index, error); + return error; + } + } + + if (gain_index == 2) { + /* Set Analog gain */ + int gain = -1; + + if (gain_value % 2) { + gain_value -= 1; + dev_err(dev, + "Odd Gain received.Fixing it to 2dB step gain_value = %d", + gain_value); + } + /* Fix for 4dB step gains. Select one lower value */ + if (gain_value == -22) + gain_value = -24; + + if (gain_value == -26) + gain_value = -28; + + if (gain_value == -30) + gain_value = -32; + + for (i = 0 ; i < 16; i++) { + if (hs_analog_gain_table[i] == gain_value) { + gain = i<<4; + break; + } + } + if (gain == -1) + return -1; + + if ((AB8500_REV_10 == acodec_device_id) || + (AB8500_REV_11 == acodec_device_id)) { + if (!gain) + gain = 0x10; + gain = 0xF0 - gain; + } + initial_val = HW_REG_READ(ANALOG_HS_GAIN_REG); + + /* Write gain */ + error = HW_REG_WRITE(ANALOG_HS_GAIN_REG, ((initial_val & + (~L_ANALOG_GAIN_MASK)) | (gain & L_ANALOG_GAIN_MASK))); + if (0 != error) { + dev_err(dev, + "Set Gain HSL gain index = %d %d", + gain_index, error); + return error; + } + } + } + + /* for HSR */ + if (channel_index & e_CHANNEL_2) { + /* Set Gain HSR */ + if (gain_index == 0) { + int gain = 0; + gain = 0 - gain_value; + + initial_val = HW_REG_READ(DA2_DIGITAL_GAIN_REG); + /* Write gain */ + error = HW_REG_WRITE(DA2_DIGITAL_GAIN_REG, ((initial_val + & (~DIGITAL_GAIN_MASK)) | (gain & DIGITAL_GAIN_MASK))); + if (0 != error) { + dev_err(dev, + "Set Gain HSR gain index = %d %d", + gain_index, error); + return error; + } + } + + if (gain_index == 1) { + int gain = 0; + gain = 8 - gain_value; + + initial_val = HW_REG_READ(HSR_DIGITAL_GAIN_REG); + /* Write gain */ + error = HW_REG_WRITE(HSR_DIGITAL_GAIN_REG, ((initial_val + & (~HS_DIGITAL_GAIN_MASK)) | (gain & + HS_DIGITAL_GAIN_MASK))); + if (0 != error) { + dev_err(dev, + "Set Gain HSR gain index = %d %d", + gain_index, error); + return error; + } + + } + + if (gain_index == 2) { + /* Set Analog gain */ + int gain = -1; + + if (gain_value % 2) { + gain_value -= 1; + dev_err(dev, + "Odd Gain received.Fixing it to 2dB step gain_value = %d", + gain_value); + } + /* Fix for 4dB step gains. Select one lower value */ + if (gain_value == -22) + gain_value = -24; + + if (gain_value == -26) + gain_value = -28; + + if (gain_value == -30) + gain_value = -32; + + for (i = 0 ; i < 16 ; i++) { + if (hs_analog_gain_table[i] == gain_value) { + gain = i; + break; + } + } + if (gain == -1) + return -1; + + if ((AB8500_REV_10 == acodec_device_id) || + (AB8500_REV_11 == acodec_device_id)) { + if (!gain) + gain = 1; + gain = 0x0F - gain; + } + initial_val = HW_REG_READ(ANALOG_HS_GAIN_REG); + /* Write gain */ + error = HW_REG_WRITE(ANALOG_HS_GAIN_REG, ((initial_val & + (~R_ANALOG_GAIN_MASK)) | (gain & R_ANALOG_GAIN_MASK))); + if (0 != error) { + dev_err(dev, + "Set Gain HSR gainindex = %d %d", + gain_index, error); + return error; + } + } + } + dump_acodec_registers(__func__, dev); + return error; +} +/** + * @brief Set gain of earpiece + * @channel_index Channel-index of earpiece + * @gain_index Gain index of earpiece + * @gain_value Gain value of earpiece + * @linear + * @return 0 on success otherwise negative error code + */ + +int ste_audio_io_set_earpiece_gain(enum AUDIOIO_CH_INDEX channel_index, + u16 gain_index, int gain_value, u32 linear, + struct device *dev) +{ + int error = 0; + unsigned char initial_val = 0; + if (channel_index & e_CHANNEL_1) { + if (0 == gain_index) { + int gain = 0; + gain = 0 - gain_value; + + initial_val = HW_REG_READ(DA1_DIGITAL_GAIN_REG); + /* Write gain */ + error = HW_REG_WRITE(DA1_DIGITAL_GAIN_REG, ((initial_val + & (~DIGITAL_GAIN_MASK)) | (gain & DIGITAL_GAIN_MASK))); + if (0 != error) { + dev_err(dev, + "Set Gain Ear gainindex = %d %d", + gain_index, error); + return error; + } + } + + if (gain_index == 1) { + int gain = 0; + gain = 8 - gain_value; + + initial_val = HW_REG_READ(HSL_EAR_DIGITAL_GAIN_REG); + /* Write gain */ + error = HW_REG_WRITE(HSL_EAR_DIGITAL_GAIN_REG, + ((initial_val & (~HS_DIGITAL_GAIN_MASK)) | (gain & + HS_DIGITAL_GAIN_MASK))); + if (0 != error) { + dev_err(dev, + "Set Gain Ear gainindex = %d %d", + gain_index, error); + return error; + } + } + } + dump_acodec_registers(__func__, dev); + return error; +} +/** + * @brief Set gain of vibl + * @channel_index Channel-index of vibl + * @gain_index Gain index of vibl + * @gain_value Gain value of vibl + * @linear + * @return 0 on success otherwise negative error code + */ + +int ste_audio_io_set_vibl_gain(enum AUDIOIO_CH_INDEX channel_index, + u16 gain_index, int gain_value, u32 linear, + struct device *dev) +{ + + int error = 0; + unsigned char initial_val = 0; + + if (channel_index & e_CHANNEL_1) { + /* Set Gain vibl */ + if (gain_index == 0) { + int gain = 0; + gain = 0 - gain_value; + + initial_val = HW_REG_READ(DA5_DIGITAL_GAIN_REG); + /* Write gain */ + error = HW_REG_WRITE(DA5_DIGITAL_GAIN_REG, ((initial_val + & (~DIGITAL_GAIN_MASK)) | (gain & DIGITAL_GAIN_MASK))); + if (0 != error) { + dev_err(dev, + "Set Gain VibL gain index = %d %d", + gain_index, error); + return error; + } + } + } + return error; +} +/** + * @brief Set gain of vibr + * @channel_index Channel-index of vibr + * @gain_index Gain index of vibr + * @gain_value Gain value of vibr + * @linear + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_set_vibr_gain(enum AUDIOIO_CH_INDEX channel_index, + u16 gain_index, int gain_value, + u32 linear, + struct device *dev) +{ + + int error = 0; + unsigned char initial_val = 0; + + if (channel_index & e_CHANNEL_1) { + /* Set Gain vibr */ + if (gain_index == 0) { + int gain = 0; + gain = 0 - gain_value; + + initial_val = HW_REG_READ(DA6_DIGITAL_GAIN_REG); + /* Write gain */ + error = HW_REG_WRITE(DA6_DIGITAL_GAIN_REG, + ((initial_val + & (~DIGITAL_GAIN_MASK)) | (gain & DIGITAL_GAIN_MASK))); + if (0 != error) { + dev_err(dev, + "Set Gain VibR gain index = %d %d", + gain_index, error); + return error; + } + } + } + return error; +} +/** + * @brief Set gain of ihf along a specified channel + * @channel_index Channel-index of ihf + * @gain_index Gain index of ihf + * @gain_value Gain value of ihf + * @linear + * @return 0 on success otherwise negative error code + */ + +int ste_audio_io_set_ihf_gain(enum AUDIOIO_CH_INDEX channel_index, + u16 gain_index, int gain_value, u32 linear, + struct device *dev) +{ + int error = 0; + unsigned char initial_val = 0; + + if (channel_index & e_CHANNEL_1) { + /* Set Gain IHFL */ + if (gain_index == 0) { + int gain = 0; + gain = 0 - gain_value; + + initial_val = HW_REG_READ(DA3_DIGITAL_GAIN_REG); + error = HW_REG_WRITE(DA3_DIGITAL_GAIN_REG, ((initial_val + & (~DIGITAL_GAIN_MASK)) | (gain & DIGITAL_GAIN_MASK))); + if (0 != error) { + dev_err(dev, + "Set Gain IHFL gain index = %d %d", + gain_index, error); + return error; + } + + } + } + if (channel_index & e_CHANNEL_2) { + /* Set Gain IHFR */ + if (gain_index == 0) { + int gain = 0; + gain = 0 - gain_value; + + initial_val = HW_REG_READ(DA4_DIGITAL_GAIN_REG); + error = HW_REG_WRITE(DA4_DIGITAL_GAIN_REG, ((initial_val + & (~DIGITAL_GAIN_MASK)) | (gain & DIGITAL_GAIN_MASK))); + if (0 != error) { + dev_err(dev, + "Set Gain IHFR gain index = %d %d", + gain_index, error); + return error; + } + } + } + + return error; +} +/** + * @brief Set gain of MIC1A & MIC1B + * @channel_index Channel-index of MIC1 + * @gain_index Gain index of MIC1 + * @gain_value Gain value of MIC1 + * @linear + * @return 0 on success otherwise negative error code + */ + +int ste_audio_io_set_mic1a_gain(enum AUDIOIO_CH_INDEX channel_index, + u16 gain_index, int gain_value, u32 linear, + struct device *dev) +{ + int error = 0; + unsigned char initial_val = 0; + + if (channel_index & e_CHANNEL_1) { + /* Set Gain mic1 */ + if (gain_index == 0) { + int gain = 0; + gain = 31 - gain_value; + + initial_val = HW_REG_READ(AD3_DIGITAL_GAIN_REG); + /* Write gain */ + error = HW_REG_WRITE(AD3_DIGITAL_GAIN_REG, ((initial_val + & (~DIGITAL_GAIN_MASK)) | (gain & DIGITAL_GAIN_MASK))); + if (0 != error) { + dev_err(dev, + "Set Gain Mic1 gain index = %d %d", + gain_index, error); + return error; + } + + } + + if (gain_index == 1) { + /* Set Analog gain */ + initial_val = HW_REG_READ(ANALOG_MIC1_GAIN_REG); + + /* Write gain */ + error = HW_REG_WRITE(ANALOG_MIC1_GAIN_REG, ((initial_val + & (~MIC_ANALOG_GAIN_MASK)) | (gain_value & + MIC_ANALOG_GAIN_MASK))); + if (0 != error) { + dev_err(dev, + "Set Gain Mic1 gain index = %d %d", + gain_index, error); + return error; + } + } + } + return error; +} +/** + * @brief Set gain of MIC2 + * @channel_index Channel-index of MIC2 + * @gain_index Gain index of MIC2 + * @gain_value Gain value of MIC2 + * @linear + * @return 0 on success otherwise negative error code + */ + +int ste_audio_io_set_mic2_gain(enum AUDIOIO_CH_INDEX channel_index, + u16 gain_index, int gain_value, + u32 linear, + struct device *dev) +{ + int error = 0; + unsigned char initial_val = 0; + + if (channel_index & e_CHANNEL_1) { + /* Set Gain mic2 */ + if (gain_index == 0) { + int gain = 0; + gain = 31 - gain_value; + + initial_val = HW_REG_READ(AD2_DIGITAL_GAIN_REG); + /* Write gain */ + error = HW_REG_WRITE(AD2_DIGITAL_GAIN_REG, ((initial_val + & (~DIGITAL_GAIN_MASK)) | (gain & DIGITAL_GAIN_MASK))); + if (0 != error) { + dev_err(dev, + "Set Gain Mic2 gain index = %d %d", + gain_index, error); + return error; + } + + } + + if (gain_index == 1) { + /* Set Analog gain */ + initial_val = HW_REG_READ(ANALOG_MIC2_GAIN_REG); + + /* Write gain */ + error = HW_REG_WRITE(ANALOG_MIC2_GAIN_REG, ((initial_val + & (~MIC_ANALOG_GAIN_MASK)) | (gain_value & + MIC_ANALOG_GAIN_MASK))); + if (0 != error) { + dev_err(dev, + "Set Gain Mic2 gain index = %d %d", + gain_index, error); + return error; + } + } + } + return error; +} +/** + * @brief Set gain of Lin IN along a specified channel + * @channel_index Channel-index of Lin In + * @gain_index Gain index of Lin In + * @gain_value Gain value of Lin In + * @linear + * @return 0 on success otherwise negative error code + */ + +int ste_audio_io_set_lin_gain(enum AUDIOIO_CH_INDEX channel_index, + u16 gain_index, int gain_value, u32 linear, + struct device *dev) +{ + int error = 0; + unsigned char initial_val = 0; + + if (channel_index & e_CHANNEL_1) { + if (gain_index == 0) { + int gain = 0; + gain = 31 - gain_value; + + initial_val = HW_REG_READ(AD1_DIGITAL_GAIN_REG); + /* Write gain */ + error = HW_REG_WRITE(AD1_DIGITAL_GAIN_REG, + ((initial_val + & (~DIGITAL_GAIN_MASK)) | (gain & DIGITAL_GAIN_MASK))); + if (0 != error) { + dev_err(dev, + "Set Gain LinInL gain index = %d %d", + gain_index, error); + return error; + } + + } + + if (gain_index == 1) { + int gain = 0; + /* + * Converting -10 to 20 range into 0 - 15 + * & shifting it left by 4 bits + */ + gain = ((gain_value/2) + 5)<<4; + + initial_val = HW_REG_READ(ANALOG_LINE_IN_GAIN_REG); + /* Write gain */ + error = HW_REG_WRITE(ANALOG_LINE_IN_GAIN_REG, + ((initial_val & (~L_ANALOG_GAIN_MASK)) | (gain & + L_ANALOG_GAIN_MASK))); + if (0 != error) { + dev_err(dev, + "Set Gain LinInL gain index = %d %d", + gain_index, error); + return error; + } + } + } + + if (channel_index & e_CHANNEL_2) { + /* Set Gain LinInR */ + if (gain_index == 0) { + int gain = 0; + gain = 31 - gain_value; + + initial_val = HW_REG_READ(AD2_DIGITAL_GAIN_REG); + /* Write gain */ + error = HW_REG_WRITE(AD2_DIGITAL_GAIN_REG, + ((initial_val + & (~DIGITAL_GAIN_MASK)) | (gain & DIGITAL_GAIN_MASK))); + if (0 != error) { + dev_err(dev, + "Set Gain LinInR gain index = %d%d", + gain_index, error); + return error; + } + } + if (gain_index == 1) { + int gain = 0; + /* Converting -10 to 20 range into 0 - 15 */ + gain = ((gain_value/2) + 5); + + initial_val = HW_REG_READ(ANALOG_LINE_IN_GAIN_REG); + /* Write gain */ + error = HW_REG_WRITE(ANALOG_LINE_IN_GAIN_REG, + ((initial_val & (~R_ANALOG_GAIN_MASK)) | (gain & + R_ANALOG_GAIN_MASK))); + if (0 != error) { + dev_err(dev, + "Set Gain LinInR gain index = %d %d", + gain_index, error); + return error; + } + } + } + + return error; +} +/** + * @brief Set gain of DMIC12 along a specified channel + * @channel_index Channel-index of DMIC12 + * @gain_index Gain index of DMIC12 + * @gain_value Gain value of DMIC12 + * @linear + * @return 0 on success otherwise negative error code + */ + +int ste_audio_io_set_dmic12_gain(enum AUDIOIO_CH_INDEX channel_index, + u16 gain_index, int gain_value, u32 linear, + struct device *dev) +{ + int error = 0; + unsigned char initial_val = 0; + + if (channel_index & e_CHANNEL_1) { + /* Set Gain Dmic1 */ + if (gain_index == 0) { + int gain = 0; + gain = 31 - gain_value; + + initial_val = HW_REG_READ(AD1_DIGITAL_GAIN_REG); + error = HW_REG_WRITE(AD1_DIGITAL_GAIN_REG, + ((initial_val + & (~DIGITAL_GAIN_MASK)) | (gain & DIGITAL_GAIN_MASK))); + if (0 != error) { + dev_err(dev, + "Set Gain DMic1 gain index = %d %d", + gain_index, error); + return error; + } + } + } + if (channel_index & e_CHANNEL_2) { + /* Set Gain Dmic2 */ + if (gain_index == 0) { + int gain = 0; + gain = 31 - gain_value; + + initial_val = HW_REG_READ(AD2_DIGITAL_GAIN_REG); + error = HW_REG_WRITE(AD2_DIGITAL_GAIN_REG, + ((initial_val + & (~DIGITAL_GAIN_MASK)) | (gain & DIGITAL_GAIN_MASK))); + if (0 != error) { + dev_err(dev, + "Set Gain DMic2 gain index = %d %d", + gain_index, error); + return error; + } + } + } + return error; +} + +int ste_audio_io_switch_to_burst_mode_headset(int burst_fifo_switch_frame, + struct device *dev) +{ + int error = 0; + + error = HW_ACODEC_MODIFY_WRITE(BURST_FIFO_INT_CONTROL_REG, + WAKEUP_SIGNAL_SAMPLE_COUNT, 0); + if (0 != error) + return error; + + error = HW_ACODEC_MODIFY_WRITE(BURST_FIFO_LENGTH_REG, + BURST_FIFO_TRANSFER_LENGTH, 0); + if (0 != error) + return error; + + error = HW_ACODEC_MODIFY_WRITE(BURST_FIFO_CONTROL_REG, + (BURST_FIFO_INF_RUNNING | BURST_FIFO_INF_IN_MASTER_MODE + |PRE_BIT_CLK0_COUNT), 0); + if (0 != error) + return error; + + error = HW_ACODEC_MODIFY_WRITE(BURST_FIFO_WAKE_UP_DELAY_REG, + BURST_FIFO_WAKUP_DEALAY, 0); + if (0 != error) + return error; + + error = HW_REG_WRITE(BURST_FIFO_SWITCH_FRAME_REG, + burst_fifo_switch_frame); + if (0 != error) + return error; + + error = HW_ACODEC_MODIFY_WRITE(TDM_IF_BYPASS_B_FIFO_REG, + IF0_BFifoEn, 0); + if (0 != error) + return error; + + return error; +} +int ste_audio_io_switch_to_normal_mode_headset( + struct device *dev) +{ + int error = 0; + + error = HW_ACODEC_MODIFY_WRITE(TDM_IF_BYPASS_B_FIFO_REG, 0, + IF0_BFifoEn); + if (0 != error) + return error; + + error = HW_ACODEC_MODIFY_WRITE(BURST_FIFO_INT_CONTROL_REG, + 0, WAKEUP_SIGNAL_SAMPLE_COUNT); + if (0 != error) + return error; + + error = HW_ACODEC_MODIFY_WRITE(BURST_FIFO_LENGTH_REG, + 0, BURST_FIFO_TRANSFER_LENGTH); + if (0 != error) + return error; + + error = HW_ACODEC_MODIFY_WRITE(BURST_FIFO_CONTROL_REG, 0, + (BURST_FIFO_INF_RUNNING | BURST_FIFO_INF_IN_MASTER_MODE + |PRE_BIT_CLK0_COUNT)); + if (0 != error) + return error; + + error = HW_ACODEC_MODIFY_WRITE(BURST_FIFO_WAKE_UP_DELAY_REG, + 0, BURST_FIFO_WAKUP_DEALAY); + if (0 != error) + return error; + + return error; +} + + +int ste_audio_io_mute_vibl(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + return 0; +} + +int ste_audio_io_unmute_vibl(enum AUDIOIO_CH_INDEX channel_index, int *gain, + struct device *dev) +{ + return 0; +} + +int ste_audio_io_mute_vibr(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + return 0; +} +int ste_audio_io_unmute_vibr(enum AUDIOIO_CH_INDEX channel_index, int *gain, + struct device *dev) +{ + return 0; +} + +int ste_audio_io_mute_dmic12(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + int error = 0; + if ((channel_index & (e_CHANNEL_1 | e_CHANNEL_2))) { + error = ste_audio_io_set_dmic12_gain(channel_index, 0, -32, + 0, dev); + if (0 != error) { + dev_err(dev, "Mute dmic12 %d", error); + return error; + } + } + + return error; + +} + +int ste_audio_io_unmute_dmic12(enum AUDIOIO_CH_INDEX channel_index, int *gain, + struct device *dev) +{ + int error = 0; + if ((channel_index & (e_CHANNEL_1 | e_CHANNEL_2))) { + error = ste_audio_io_set_dmic12_gain(channel_index, + 0, gain[0], 0, dev); + if (0 != error) { + dev_err(dev, "UnMute dmic12 %d", error); + return error; + } + } + return error; +} +int ste_audio_io_enable_fade_dmic12(struct device *dev) +{ + return 0; +} + +int ste_audio_io_disable_fade_dmic12(struct device *dev) +{ + return 0; +} + +/** + * @brief enable hardware loop of dmic12 + * @chnl_index Channel-index of dmic12 + * @hw_loop type of hardware loop + * @loop_gain gain value to be used in hardware loop + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_enable_loop_dmic12(enum AUDIOIO_CH_INDEX chnl_index, + enum AUDIOIO_HAL_HW_LOOPS hw_loop, + int loop_gain, struct device *dev, + void *cookie) +{ + int error = 0; + struct transducer_context_t *trnsdr; + trnsdr = (struct transducer_context_t *)cookie; + + switch (hw_loop) { + /* Check if HSL is active */ + case AUDIOIO_SIDETONE_LOOP: + if (!trnsdr[HS_CH].is_power_up[e_CHANNEL_1] + && !trnsdr[EAR_CH].is_power_up[e_CHANNEL_1]) { + error = -EFAULT; + dev_err(dev, + "Sidetone enable needs HS or Earpiece powered up, err = %d", + error); + return error; + } + + if (chnl_index & e_CHANNEL_1) { + /* For ch1, Power On STFIR1, data comes from AD1*/ + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_MUXES_REG2, + 0, FIR1_FROMAD1); + if (error) { + dev_err(dev, "FIR1 data comes from AD_OUT1 %d", + error); + return error; + } + + error = HW_REG_WRITE(SIDETONE_FIR1_GAIN_REG, loop_gain); + if (error) { + dev_err(dev, + "Set FIR1 Gain index = %d", error); + return error; + } + } + + if (chnl_index & e_CHANNEL_2) { + /* For ch2, Power On STFIR1, data comes from AD2*/ + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_MUXES_REG2, + 0, FIR1_FROMAD2); + if (error) { + dev_err(dev, "FIR1 data comes from AD_OUT2 %d", + error); + return error; + } + error = HW_REG_WRITE(SIDETONE_FIR2_GAIN_REG, loop_gain); + if (error) { + dev_err(dev, + "Set FIR2 Gain error = %d", error); + return error; + } + } + break; + default: + error = -EINVAL; + dev_err(dev, "loop not supported %d", error); + } + dump_acodec_registers(__func__, dev); + return error; +} + +/** + * @brief disable hardware loop of dmic12 + * @chnl_index Channel-index of dmic12 + * @hw_loop type of hardware loop + * @return 0 on success otherwise negative error code + */ +int ste_audio_io_disable_loop_dmic12(enum AUDIOIO_CH_INDEX chnl_index, + enum AUDIOIO_HAL_HW_LOOPS hw_loop, + struct device *dev, void *cookie) +{ + int error = -EINVAL; + struct transducer_context_t *trnsdr; + trnsdr = (struct transducer_context_t *)cookie; + + switch (hw_loop) { + /* Check if HSL is active */ + case AUDIOIO_SIDETONE_LOOP: + if (!trnsdr[HS_CH].is_power_up[e_CHANNEL_1] + && !trnsdr[EAR_CH].is_power_up[e_CHANNEL_1]) { + error = -EFAULT; + dev_err(dev, + "Sidetone disable needs HS or Earpiece powered up, err = %d", + error); + return error; + } + + if (chnl_index & e_CHANNEL_1) { + /* For ch1, Power On STFIR1, data comes from AD1*/ + error = + HW_ACODEC_MODIFY_WRITE(DIGITAL_MUXES_REG2, + 0, FIR1_FROMAD1); + if (error) + dev_err(dev, "FIR1 data comes from AD_OUT1 %d", + error); + } + + if (chnl_index & e_CHANNEL_2) { + /* For ch2, Power On STFIR1, data comes from AD2*/ + error = + HW_ACODEC_MODIFY_WRITE(DIGITAL_MUXES_REG2, + 0, FIR1_FROMAD2); + if (error) + dev_err(dev, "FIR1 data comes from AD_OUT2 %d", + error); + } + error = HW_ACODEC_MODIFY_WRITE(FILTERS_CONTROL_REG, + 0, FIR_FILTERCONTROL); + if (error) { + dev_err(dev, + "ST FIR Filters disable failed %d", error); + return error; + } + break; + default: + dev_err(dev, "loop not supported %d", error); + } + dump_acodec_registers(__func__, dev); + return error; +} + +int ste_audio_io_power_up_dmic34(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + int error = 0; + unsigned char initialVal_AD = 0; + + /* Check if DMic34 request is mono or Stereo */ + if (!(channel_index & (e_CHANNEL_1 | e_CHANNEL_2))) { + dev_err(dev, "DMic34 does not support more than 2 channels"); + return -EINVAL; + } + + /* Setting Direction for GPIO pins on AB8500 */ + error = HW_REG_WRITE(AB8500_GPIO_DIR4_REG, GPIO29_DIR_OUTPUT); + if (0 != error) { + dev_err(dev, "Setting Direction for GPIO pins on AB8500 %d", + error); + return error; + } + + if (channel_index & e_CHANNEL_1) { + /* Check if DMIC3 is already powered up or used by Mic1A + or Mic1B */ + initialVal_AD = HW_REG_READ(DIGITAL_AD_CHANNELS_ENABLE_REG); + + if (initialVal_AD & (EN_AD3)) + return 0; + + + error = HW_ACODEC_MODIFY_WRITE(AD_ALLOCATION_TO_SLOT2_3_REG, + DATA_FROM_AD_OUT3, 0); + if (0 != error) { + dev_err(dev, "Slot 02 outputs data from AD_OUT3 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_AD_CHANNELS_ENABLE_REG, EN_AD3, + 0); + if (0 != error) { + dev_err(dev, "Enable AD3 for DMIC3 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_MUXES_REG1, + SEL_DMIC3_FOR_AD_OUT3, + 0); + if (0 != error) { + dev_err(dev, "Select DMIC3 for AD_OUT3 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DMIC_ENABLE_REG, EN_DMIC3, 0); + if (0 != error) { + dev_err(dev, "Enable DMIC3 %d", error); + return error; + } +} + + /* Enable AD4 for Dmic4 */ + if (channel_index & e_CHANNEL_2) { + /* Check if DMIC4 is already powered up */ + if (initialVal_AD & (EN_AD4)) + return 0; + + + error = HW_ACODEC_MODIFY_WRITE(AD_ALLOCATION_TO_SLOT2_3_REG, + (DATA_FROM_AD_OUT4<<4), 0); + if (0 != error) { + dev_err(dev, "Slot 03 outputs data from AD_OUT4 %d", + error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_AD_CHANNELS_ENABLE_REG, + EN_AD4, 0); + if (0 != error) { + dev_err(dev, "Enable AD4 for DMIC4 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DMIC_ENABLE_REG, EN_DMIC4, 0); + if (0 != error) { + dev_err(dev, "Enable DMIC4 %d", error); + return error; + } + } + return error; + } + +int ste_audio_io_power_down_dmic34(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + int error = 0; + unsigned char initialVal_AD = 0; + + + if (!(channel_index & (e_CHANNEL_1 | e_CHANNEL_2))) { + dev_err(dev, "DMic34 does not support more than 2 channels"); + return -EINVAL; + } + + /* Setting Direction for GPIO pins on AB8500 */ + error = HW_ACODEC_MODIFY_WRITE(AB8500_GPIO_DIR4_REG, 0, + GPIO29_DIR_OUTPUT); + if (0 != error) { + dev_err(dev, "Clearing Direction for GPIO pins on AB8500 %d", + error); + return error; + } + + /* Enable AD1 for Dmic1 */ + if (channel_index & e_CHANNEL_1) { + /* Check if DMIC3 is already powered Down or used by Mic1A + or Mic1B */ + initialVal_AD = HW_REG_READ(DIGITAL_AD_CHANNELS_ENABLE_REG); + if (!(initialVal_AD & EN_AD3)) + return 0; + + error = HW_ACODEC_MODIFY_WRITE(DMIC_ENABLE_REG, 0, EN_DMIC3); + if (0 != error) { + dev_err(dev, "Enable DMIC3 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_AD_CHANNELS_ENABLE_REG, + 0, + EN_AD3); + if (0 != error) { + dev_err(dev, "Disable AD3 for DMIC3 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(AD_ALLOCATION_TO_SLOT2_3_REG, 0, + DATA_FROM_AD_OUT3); + if (0 != error) { + dev_err(dev, + "Slot 02 outputs data cleared from AD_OUT3 %d", + error); + return error; + } + } + + /* Enable AD4 for Dmic4 */ + if (channel_index & e_CHANNEL_2) { + /* Check if DMIC4 is already powered down */ + initialVal_AD = HW_REG_READ(DIGITAL_AD_CHANNELS_ENABLE_REG); + if (!(initialVal_AD & EN_AD4)) + return 0; + + error = HW_ACODEC_MODIFY_WRITE(DMIC_ENABLE_REG, 0, EN_DMIC4); + if (0 != error) { + dev_err(dev, "Enable DMIC4 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(DIGITAL_AD_CHANNELS_ENABLE_REG, + 0, EN_AD4); + if (0 != error) { + dev_err(dev, "Disable AD4 for DMIC4 %d", error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(AD_ALLOCATION_TO_SLOT2_3_REG, 0, + (DATA_FROM_AD_OUT4<<4)); + if (0 != error) { + dev_err(dev, + "Slot 03 outputs data cleared from AD_OUT4 %d", + error); + return error; + } + } + return error; +} +int ste_audio_io_set_dmic34_gain(enum AUDIOIO_CH_INDEX channel_index, + u16 gain_index, int gain_value, u32 linear, + struct device *dev) +{ + int error = 0; + unsigned char initial_val = 0; + + if (channel_index & e_CHANNEL_1) { + /* Set Gain Dmic3 */ + if (gain_index == 0) { + int gain = 0; + gain = 31 - gain_value; + + initial_val = HW_REG_READ(AD3_DIGITAL_GAIN_REG); + error = HW_REG_WRITE(AD3_DIGITAL_GAIN_REG, + ((initial_val + & (~DIGITAL_GAIN_MASK)) | (gain & DIGITAL_GAIN_MASK))); + if (0 != error) { + dev_err(dev, + "Set Gain DMic3 gain index = %d %d", + gain_index, error); + return error; + } + } + } + + if (channel_index & e_CHANNEL_2) { + /* Set Gain Dmic4 */ + if (gain_index == 0) { + int gain = 0; + gain = 31 - gain_value; + + initial_val = HW_REG_READ(AD4_DIGITAL_GAIN_REG); + error = HW_REG_WRITE(AD4_DIGITAL_GAIN_REG, + ((initial_val + & (~DIGITAL_GAIN_MASK)) | (gain & DIGITAL_GAIN_MASK))); + + if (0 != error) { + dev_err(dev, + "Set Gain DMic4 gain index = %d %d", + gain_index, error); + return error; + } + } + } + + return error; +} +int ste_audio_io_mute_dmic34(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + int error = 0; + if ((channel_index & (e_CHANNEL_1 | e_CHANNEL_2))) { + error = ste_audio_io_set_dmic34_gain(channel_index, 0, -32, + 0, dev); + if (0 != error) { + dev_err(dev, "Mute dmic34 %d", error); + return error; + } + } + return error; +} +int ste_audio_io_unmute_dmic34(enum AUDIOIO_CH_INDEX channel_index, int *gain, + struct device *dev) +{ + int error = 0; + if ((channel_index & (e_CHANNEL_1 | e_CHANNEL_2))) { + error = ste_audio_io_set_dmic34_gain(channel_index, + 0, gain[0], 0, dev); + if (0 != error) { + dev_err(dev, "UnMute dmic34 %d", error); + return error; + } + } + return error; +} +int ste_audio_io_enable_fade_dmic34(struct device *dev) +{ + return 0; +} + +int ste_audio_io_disable_fade_dmic34(struct device *dev) +{ + return 0; +} + +int ste_audio_io_power_up_dmic56(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + return 0; +} +int ste_audio_io_power_down_dmic56(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + return 0; +} +int ste_audio_io_set_dmic56_gain(enum AUDIOIO_CH_INDEX channel_index, + u16 gain_index, int gain_value, u32 linear, + struct device *dev) +{ + return 0; +} +int ste_audio_io_mute_dmic56(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + return 0; +} +int ste_audio_io_unmute_dmic56(enum AUDIOIO_CH_INDEX channel_index, int *gain, + struct device *dev) +{ + return 0; +} +int ste_audio_io_enable_fade_dmic56(struct device *dev) +{ + return 0; +} + +int ste_audio_io_disable_fade_dmic56(struct device *dev) +{ + return 0; +} + +int ste_audio_io_configure_if1(struct device *dev) +{ + int error = 0; + + error = HW_REG_WRITE(IF1_CONF_REG, IF_DELAYED | + I2S_LEFT_ALIGNED_FORMAT | WORD_LENGTH_16); + if (error != 0) { + dev_err(dev, + "Configure IF1: I2S Format 16 Bits word length error = %d", + error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(TDM_IF_BYPASS_B_FIFO_REG, IF1_MASTER, 0); + if (error != 0) { + dev_err(dev, + "Configure IF1: IF1 master error = %d", + error); + return error; + } + + error = HW_ACODEC_MODIFY_WRITE(IF0_IF1_MASTER_CONF_REG, + EN_FSYNC_BITCLK1, 0); + if (error != 0) { + dev_err(dev, + "ConfigIF1 bitclk is 32x48KHz, enable Fsync1 and Bitclk1 error = %d", + error); + return error; + } + return error; +} + +int ste_audio_io_power_up_fmrx(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + int error = 0; + unsigned char initialVal = 0; + if (!(channel_index & (e_CHANNEL_1 | e_CHANNEL_2))) { + dev_err(dev, "FMRX should have mono or stereo channels"); + return -EINVAL; + } + + ste_audio_io_configure_if1(dev); + + if (channel_index & e_CHANNEL_1) { + error = HW_ACODEC_MODIFY_WRITE(SLOT_SELECTION_TO_DA7_REG, + SLOT24_FOR_DA_PATH, 0); + if (0 != error) { + dev_err(dev, "Data sent to DA_IN7 from Slot 24 %d", + error); + return error; + } + /* DA_IN7 to AD_OUT8 path */ + error = HW_ACODEC_MODIFY_WRITE(SLOT_SELECTION_TO_DA5_REG, + SEL_AD_OUT8_FROM_DAIN7, 0); + if (0 != error) { + dev_err(dev, "Data sent to AD_OUT5 from DA_IN7 %d", + error); + return error; + } + + initialVal = HW_REG_READ(AD_ALLOCATION_TO_SLOT6_7_REG); + error = HW_REG_WRITE(AD_ALLOCATION_TO_SLOT6_7_REG, + ((initialVal & MASK_QUARTET1)|SEL_IF6_FROM_AD_OUT5)); + if (0 != error) { + dev_err(dev, "Data sent to IF slot 6 from AD_OUT5 %d", + error); + return error; + } + } + + if (channel_index & e_CHANNEL_2) { + error = HW_ACODEC_MODIFY_WRITE(SLOT_SELECTION_TO_DA8_REG, + SLOT25_FOR_DA_PATH, 0); + if (0 != error) { + dev_err(dev, "Data sent to DA_IN8 from Slot 25 %d", + error); + return error; + } + + /* DA_IN7 to AD_OUT8 path */ + error = HW_ACODEC_MODIFY_WRITE(SLOT_SELECTION_TO_DA6_REG, + SEL_AD_OUT6_FROM_DAIN8, 0); + if (0 != error) { + dev_err(dev, "Data sent to AD_OUT6 from DA_IN8 %d", + error); + return error; + } + + initialVal = HW_REG_READ(AD_ALLOCATION_TO_SLOT6_7_REG); + + error = HW_REG_WRITE(AD_ALLOCATION_TO_SLOT6_7_REG, + (initialVal & MASK_QUARTET0)|SEL_IF7_FROM_AD_OUT6); + /* 5x is written */ + if (0 != error) { + dev_err(dev, "Data sent to IF7 from AD_OUT6 %d", + error); + return error; + } + } + return error; +} +int ste_audio_io_power_down_fmrx(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + int error = 0; + + if (!(channel_index & (e_CHANNEL_1 | e_CHANNEL_2))) { + dev_err(dev, "FMRX should have mono or stereo channels"); + return -EINVAL; + } + if (channel_index & e_CHANNEL_1) { + /* data sent to DA7 input of DA filter form IF1 */ + error = HW_ACODEC_MODIFY_WRITE(SLOT_SELECTION_TO_DA7_REG, 0, + SLOT24_FOR_DA_PATH); + if (0 != error) { + dev_err(dev, "Clearing Data sent to DA_IN7 from Slot 24 %d", + error); + return error; + } + /* DA_IN7 to AD_OUT8 path */ + error = HW_ACODEC_MODIFY_WRITE(SLOT_SELECTION_TO_DA5_REG, 0, + SEL_AD_OUT8_FROM_DAIN7); + if (0 != error) { + dev_err(dev, "Clearing Data sent to AD_OUT5 from DA_IN7 %d", + error); + return error; + } + error = HW_ACODEC_MODIFY_WRITE(AD_ALLOCATION_TO_SLOT6_7_REG, 0, + SEL_IF6_FROM_AD_OUT5); + if (0 != error) { + dev_err(dev, + "Clearing Data sent to IF slot 6 from AD_OUT5 %d", + error); + return error; + } +} + + if (channel_index & e_CHANNEL_2) { + error = HW_ACODEC_MODIFY_WRITE(SLOT_SELECTION_TO_DA8_REG, 0, + SLOT25_FOR_DA_PATH); + if (0 != error) { + dev_err(dev, + "Clearing Data sent to DA_IN8 from Slot 25 %d", + error); + return error; + } + + /* DA_IN7 to AD_OUT8 path */ + error = HW_ACODEC_MODIFY_WRITE(SLOT_SELECTION_TO_DA6_REG, 0, + SEL_AD_OUT6_FROM_DAIN8); + if (0 != error) { + dev_err(dev, + "Clearing Data sent to AD_OUT6 from DA_IN8 %d", + error); + return error; + } + error = HW_ACODEC_MODIFY_WRITE(AD_ALLOCATION_TO_SLOT6_7_REG, 0, + SEL_IF7_FROM_AD_OUT6); + if (0 != error) { + dev_err(dev, + "Clearing Data sent to IF7 from AD_OUT6 %d", + error); + return error; + } + } + return error; +} + +int ste_audio_io_power_up_fmtx(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + int error = 0; + unsigned char initialVal = 0; + + if (!(channel_index & (e_CHANNEL_1 | e_CHANNEL_2))) { + dev_err(dev, "FMTX should have mono or stereo channels"); + return -EINVAL; + } + + ste_audio_io_configure_if1(dev); + + if (channel_index & e_CHANNEL_1) { + /* data sent to DA7 input of DA filter form IF1 14 slot */ + error = HW_ACODEC_MODIFY_WRITE(SLOT_SELECTION_TO_DA7_REG, + SLOT14_FOR_DA_PATH, 0); + if (0 != error) { + dev_err(dev, + "Data sent to DA_IN7 from Slot 14 %d", error); + return error; + } + /* DA_IN7 to AD_OUT5 path */ + error = HW_ACODEC_MODIFY_WRITE(SLOT_SELECTION_TO_DA5_REG, + SEL_AD_OUT5_FROM_DAIN7, 0); + if (0 != error) { + dev_err(dev, "Data sent to AD_OUT5 from DA_IN7 %d", + error); + return error; + } + + initialVal = HW_REG_READ(AD_ALLOCATION_TO_SLOT16_17_REG); + error = HW_REG_WRITE(AD_ALLOCATION_TO_SLOT16_17_REG, + (initialVal & MASK_QUARTET1)|SEL_IF6_FROM_AD_OUT5); + if (0 != error) { + dev_err(dev, "Data sent to IF16 from AD_OUT5 %d", + error); + return error; + } + } + + if (channel_index & e_CHANNEL_2) { + /* data sent to DA8 input of DA filter */ + error = HW_ACODEC_MODIFY_WRITE(SLOT_SELECTION_TO_DA8_REG, + SLOT15_FOR_DA_PATH, 0); + if (0 != error) { + dev_err(dev, "Data sent to DA_IN8 from Slot 15 %d", + error); + return error; + } + + /* DA_IN8 to AD_OUT6 path */ + error = HW_ACODEC_MODIFY_WRITE(SLOT_SELECTION_TO_DA6_REG, + SEL_AD_OUT6_FROM_DAIN8, 0); + if (0 != error) { + dev_err(dev, "Data sent to AD_OUT6 from DA_IN8 %d", + error); + return error; + } + + initialVal = HW_REG_READ(AD_ALLOCATION_TO_SLOT16_17_REG); + error = HW_REG_WRITE(AD_ALLOCATION_TO_SLOT16_17_REG, + (initialVal & MASK_QUARTET0)|SEL_IF17_FROM_AD_OUT6); + if (0 != error) { + dev_err(dev, "Data sent to IF17 from AD_OUT6 %d", + error); + return error; + } + } + return error; +} + +int ste_audio_io_power_down_fmtx(enum AUDIOIO_CH_INDEX channel_index, + struct device *dev) +{ + int error = 0; + unsigned char initialVal_AD = 0; + + if (!(channel_index & (e_CHANNEL_1 | e_CHANNEL_2))) { + dev_err(dev, "FMTX should have mono or stereo channels"); + return -EINVAL; + } + + if (channel_index & e_CHANNEL_1) { + error = HW_ACODEC_MODIFY_WRITE(SLOT_SELECTION_TO_DA7_REG, 0, + SLOT14_FOR_DA_PATH); + if (0 != error) { + dev_err(dev, + "Clearing Data sent to DA_IN7 from Slot 14 %d", + error); + return error; + } + /* DA_IN7 to AD_OUT8 path */ + error = HW_ACODEC_MODIFY_WRITE(SLOT_SELECTION_TO_DA5_REG, 0, + SEL_AD_OUT5_FROM_DAIN7); + if (0 != error) { + dev_err(dev, + "Clearing Data sent to AD_OUT5 from DA_IN7 %d", + error); + return error; + } + error = HW_REG_WRITE(AD_ALLOCATION_TO_SLOT16_17_REG, + SEL_IF6_FROM_AD_OUT5); + if (0 != error) { + dev_err(dev, + "Clearing Data sent to IF16 from AD_OUT8 %d", + error); + return error; + } + } + + if (channel_index & e_CHANNEL_2) { + /* data sent to DA8 input of DA filter */ + initialVal_AD = HW_REG_READ(SLOT_SELECTION_TO_DA8_REG); + + error = HW_ACODEC_MODIFY_WRITE(SLOT_SELECTION_TO_DA8_REG, 0, + SLOT15_FOR_DA_PATH); + if (0 != error) { + dev_err(dev, + "Clearing Data sent to DA_IN8 from Slot 15 %d", + error); + return error; + } + + /* DA_IN7 to AD_OUT8 path */ + error = HW_ACODEC_MODIFY_WRITE(SLOT_SELECTION_TO_DA6_REG, 0, + SEL_AD_OUT6_FROM_DAIN8); + if (0 != error) { + dev_err(dev, + "Clearing Data sent to AD_OUT6 from DA_IN8 %d", + error); + return error; + } + error = HW_REG_WRITE(AD_ALLOCATION_TO_SLOT16_17_REG, + SEL_IF17_FROM_AD_OUT6); + if (0 != error) { + dev_err(dev, + "Clearing Data sent to IF17 from AD_OUT6 %d", + error); + return error; + } + } + return error; +} +int ste_audio_io_power_up_bluetooth(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev) +{ + int error = 0; + struct ab8500 *ab8500 = dev_get_drvdata(dev->parent); + struct ab8500_platform_data *pdata = dev_get_platdata(ab8500->dev); + if (bluetooth_power_up_count++) + return error; + + if (pdata) { + if (pdata->audio) { + error = pdata->audio->ste_gpio_altf_init(); + if (error == 0) { + clk_ptr_msp0 = clk_get_sys("msp0", NULL); + if (!IS_ERR(clk_ptr_msp0)) { + error = clk_enable(clk_ptr_msp0); + return error; + } else + return -EFAULT; + } + } + } + return error; +} + +int ste_audio_io_power_down_bluetooth(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev) +{ + int error = 0; + struct ab8500 *ab8500 = dev_get_drvdata(dev->parent); + struct ab8500_platform_data *pdata = dev_get_platdata(ab8500->dev); + + if (--bluetooth_power_up_count) + return error; + + if (pdata) { + if (pdata->audio) { + error = pdata->audio->ste_gpio_altf_exit(); + if (error == 0) { + clk_disable(clk_ptr_msp0); + clk_put(clk_ptr_msp0); + } + } + } + return error; +} + +int dump_acodec_registers(const char *str, struct device *dev) +{ + int reg_count = REVISION_REG & 0xff; + if (1 == acodec_reg_dump) { + u8 i = 0; + dev_info(dev, "\n func : %s\n", str); + for (i = 0; i <= reg_count; i++) + dev_info(dev, + "block = 0x0D, adr = %x = %x\n", + i, HW_REG_READ((AB8500_AUDIO << 8) | i)); + } + str = str; /* keep compiler happy */ + return 0; +} + +int debug_audioio(int x) +{ + + if (1 == x) + acodec_reg_dump = 1; + else + acodec_reg_dump = 0; + return 0; +} + + + + + diff --git a/drivers/misc/audio_io_dev/ste_audio_io_func.h b/drivers/misc/audio_io_dev/ste_audio_io_func.h new file mode 100644 index 00000000000..b0f73c26225 --- /dev/null +++ b/drivers/misc/audio_io_dev/ste_audio_io_func.h @@ -0,0 +1,358 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Deepak KARDA/ deepak.karda@stericsson.com for ST-Ericsson + * License terms: GNU General Public License (GPL) version 2. + */ + +#ifndef _AUDIOIO_FUNC_H_ +#define _AUDIOIO_FUNC_H_ + +#include <linux/string.h> +#include <linux/platform_device.h> +#include <mach/ste_audio_io_ioctl.h> +#include <linux/mfd/ab8500.h> +#include <linux/mfd/abx500.h> + +#define AB8500_REV_10 0x10 +#define AB8500_REV_11 0x11 +#define AB8500_REV_20 0x20 + +#define AB8500_CTRL3_REG 0x00000200 +#define AB8500_GPIO_DIR4_REG 0x00001013 +#define AB8500_GPIO_DIR5_REG 0x00001014 +#define AB8500_GPIO_OUT5_REG 0x00001024 + +extern struct platform_device *ste_audio_io_device; +extern struct regulator *regulator_avsource; + +int dump_acodec_registers(const char *, struct device *dev); +int debug_audioio(int x); + +#define AB8500_BLOCK_ADDR(address) ((address >> 8) & 0xff) +#define AB8500_OFFSET_ADDR(address) (address & 0xff) + +static inline unsigned char HW_REG_READ(unsigned short reg) +{ + unsigned char ret; + int err; + + err = abx500_get_register_interruptible(&ste_audio_io_device->dev, + AB8500_BLOCK_ADDR(reg), + AB8500_OFFSET_ADDR(reg), + &ret); + if (err < 0) + return err; + else + return ret; +} + +static inline int HW_REG_WRITE(unsigned short reg, unsigned char data) +{ + return abx500_set_register_interruptible(&ste_audio_io_device->dev, + AB8500_BLOCK_ADDR(reg), + AB8500_OFFSET_ADDR(reg), + data); +} + +unsigned int ab8500_acodec_modify_write(unsigned int reg, u8 mask_set, + u8 mask_clear); + +#define HW_ACODEC_MODIFY_WRITE(reg, mask_set, mask_clear)\ + ab8500_acodec_modify_write(reg, mask_set, mask_clear) + +unsigned int ab8500_modify_write(unsigned int reg, u8 mask_set, u8 mask_clear); + +int ste_audio_io_power_up_headset(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_power_down_headset(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_power_state_headset_query(struct device *dev); +int ste_audio_io_set_headset_gain(enum AUDIOIO_CH_INDEX chnl_index, + u16 gain_index, int gain_value, u32 linear, + struct device *dev); +int ste_audio_io_get_headset_gain(int *, int *, u16, + struct device *dev); +int ste_audio_io_mute_headset(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_unmute_headset(enum AUDIOIO_CH_INDEX chnl_index, int *gain, + struct device *dev); +int ste_audio_io_mute_headset_state(struct device *dev); +int ste_audio_io_enable_fade_headset(struct device *dev); +int ste_audio_io_disable_fade_headset(struct device *dev); +int ste_audio_io_switch_to_burst_mode_headset(int burst_fifo_switch_frame, + struct device *dev); +int ste_audio_io_switch_to_normal_mode_headset( + struct device *dev); + +int ste_audio_io_power_up_earpiece(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_power_down_earpiece(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_power_state_earpiece_query(struct device *dev); +int ste_audio_io_set_earpiece_gain(enum AUDIOIO_CH_INDEX chnl_index, + u16 gain_index, int gain_value, u32 linear, + struct device *dev); +int ste_audio_io_get_earpiece_gain(int*, int*, u16, + struct device *dev); +int ste_audio_io_mute_earpiece(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_unmute_earpiece(enum AUDIOIO_CH_INDEX chnl_index, int *gain, + struct device *dev); +int ste_audio_io_mute_earpiece_state(struct device *dev); +int ste_audio_io_enable_fade_earpiece(struct device *dev); +int ste_audio_io_disable_fade_earpiece(struct device *dev); + +int ste_audio_io_power_up_ihf(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_power_down_ihf(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_power_state_ihf_query(struct device *dev); +int ste_audio_io_set_ihf_gain(enum AUDIOIO_CH_INDEX chnl_index, u16 gain_index, + int gain_value, u32 linear, + struct device *dev); +int ste_audio_io_get_ihf_gain(int*, int*, u16, + struct device *dev); +int ste_audio_io_mute_ihf(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_unmute_ihf(enum AUDIOIO_CH_INDEX chnl_index, int *gain, + struct device *dev); +int ste_audio_io_mute_ihf_state(struct device *dev); +int ste_audio_io_enable_fade_ihf(struct device *dev); +int ste_audio_io_disable_fade_ihf(struct device *dev); + +int ste_audio_io_power_up_vibl(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_power_down_vibl(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_power_state_vibl_query(struct device *dev); +int ste_audio_io_set_vibl_gain(enum AUDIOIO_CH_INDEX chnl_index, u16 gain_index, + int gain_value, u32 linear, + struct device *dev); +int ste_audio_io_get_vibl_gain(int*, int*, u16, + struct device *dev); +int ste_audio_io_mute_vibl(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_unmute_vibl(enum AUDIOIO_CH_INDEX chnl_index, int *gain, + struct device *dev); +int ste_audio_io_mute_vibl_state(struct device *dev); +int ste_audio_io_enable_fade_vibl(struct device *dev); +int ste_audio_io_disable_fade_vibl(struct device *dev); + +int ste_audio_io_power_up_vibr(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_power_down_vibr(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_power_state_vibr_query(struct device *dev); +int ste_audio_io_set_vibr_gain(enum AUDIOIO_CH_INDEX chnl_index, u16 gain_index, + int gain_value, u32 linear, + struct device *dev); +int ste_audio_io_get_vibr_gain(int*, int*, u16, + struct device *dev); +int ste_audio_io_mute_vibr(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_unmute_vibr(enum AUDIOIO_CH_INDEX chnl_index, int *gain, + struct device *dev); +int ste_audio_io_mute_vibr_state(struct device *dev); +int ste_audio_io_enable_fade_vibr(struct device *dev); +int ste_audio_io_disable_fade_vibr(struct device *dev); + +int ste_audio_io_power_up_mic1a(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_power_down_mic1a(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_power_state_mic1a_query(struct device *dev); +int ste_audio_io_set_mic1a_gain(enum AUDIOIO_CH_INDEX chnl_index, + u16 gain_index, int gain_value, u32 linear, struct device *dev); +int ste_audio_io_get_mic1a_gain(int*, int*, u16, + struct device *dev); +int ste_audio_io_mute_mic1a(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_unmute_mic1a(enum AUDIOIO_CH_INDEX chnl_index, int *gain, + struct device *dev); +int ste_audio_io_mute_mic1a_state(struct device *dev); +int ste_audio_io_enable_fade_mic1a(struct device *dev); +int ste_audio_io_disable_fade_mic1a(struct device *dev); + +/* + *** Mic1b *** + */ +int ste_audio_io_power_up_mic1b(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_power_down_mic1b(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_power_state_mic1b_query(struct device *dev); +int ste_audio_io_set_mic1b_gain(enum AUDIOIO_CH_INDEX chnl_index, + u16 gain_index, int gain_value, u32 linear, struct device *dev); +int ste_audio_io_get_mic1b_gain(int*, int*, u16, + struct device *dev); +int ste_audio_io_mute_mic1b(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_unmute_mic1b(enum AUDIOIO_CH_INDEX chnl_index, int *gain, + struct device *dev); +int ste_audio_io_mute_mic1b_state(struct device *dev); +int ste_audio_io_enable_fade_mic1b(struct device *dev); +int ste_audio_io_disable_fade_mic1b(struct device *dev); +int ste_audio_io_enable_loop_mic1b(enum AUDIOIO_CH_INDEX chnl_index, + enum AUDIOIO_HAL_HW_LOOPS, + int loop_gain, struct device *dev, + void *cookie); +int ste_audio_io_disable_loop_mic1b(enum AUDIOIO_CH_INDEX chnl_index, + enum AUDIOIO_HAL_HW_LOOPS hw_loop, + struct device *dev, void *cookie); +/* + *** Mic2 *** + */ +int ste_audio_io_power_up_mic2(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_power_down_mic2(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_power_state_mic2_query(struct device *dev); +int ste_audio_io_set_mic2_gain(enum AUDIOIO_CH_INDEX chnl_index, u16 gain_index, + int gain_value, u32 linear, + struct device *dev); +int ste_audio_io_get_mic2_gain(int*, int*, u16, + struct device *dev); +int ste_audio_io_mute_mic2(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_unmute_mic2(enum AUDIOIO_CH_INDEX chnl_index, int *gain, + struct device *dev); +int ste_audio_io_mute_mic2_state(struct device *dev); +int ste_audio_io_enable_fade_mic2(struct device *dev); +int ste_audio_io_disable_fade_mic2(struct device *dev); + +int ste_audio_io_power_up_lin(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_power_down_lin(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_power_state_lin_query(struct device *dev); +int ste_audio_io_set_lin_gain(enum AUDIOIO_CH_INDEX chnl_index, u16 gain_index, + int gain_value, u32 linear, + struct device *dev); +int ste_audio_io_get_lin_gain(int*, int*, u16, + struct device *dev); +int ste_audio_io_mute_lin(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_unmute_lin(enum AUDIOIO_CH_INDEX chnl_index, int *gain, + struct device *dev); +int ste_audio_io_mute_lin_state(struct device *dev); +int ste_audio_io_enable_fade_lin(struct device *dev); +int ste_audio_io_disable_fade_lin(struct device *dev); + +int ste_audio_io_power_up_dmic12(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_power_down_dmic12(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_power_state_dmic12_query(struct device *dev); +int ste_audio_io_set_dmic12_gain(enum AUDIOIO_CH_INDEX chnl_index, + u16 gain_index, int gain_value, u32 linear, + struct device *dev); +int ste_audio_io_get_dmic12_gain(int*, int*, u16, + struct device *dev); +int ste_audio_io_mute_dmic12(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_unmute_dmic12(enum AUDIOIO_CH_INDEX chnl_index, int *gain, + struct device *dev); +int ste_audio_io_mute_dmic12_state(struct device *dev); +int ste_audio_io_enable_fade_dmic12(struct device *dev); +int ste_audio_io_disable_fade_dmic12(struct device *dev); +int ste_audio_io_enable_loop_dmic12(enum AUDIOIO_CH_INDEX chnl_index, + enum AUDIOIO_HAL_HW_LOOPS, + int loop_gain, struct device *dev, + void *cookie); +int ste_audio_io_disable_loop_dmic12(enum AUDIOIO_CH_INDEX chnl_index, + enum AUDIOIO_HAL_HW_LOOPS hw_loop, + struct device *dev, void *cookie); + +int ste_audio_io_power_up_dmic34(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_power_down_dmic34(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_power_state_dmic34_query(struct device *dev); +int ste_audio_io_set_dmic34_gain(enum AUDIOIO_CH_INDEX chnl_index, + u16 gain_index, int gain_value, u32 linear, + struct device *dev); +int ste_audio_io_get_dmic34_gain(int*, int*, u16, + struct device *dev); +int ste_audio_io_mute_dmic34(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_unmute_dmic34(enum AUDIOIO_CH_INDEX chnl_index, int *gain, + struct device *dev); +int ste_audio_io_mute_dmic34_state(struct device *dev); +int ste_audio_io_enable_fade_dmic34(struct device *dev); +int ste_audio_io_disable_fade_dmic34(struct device *dev); + +int ste_audio_io_power_up_dmic56(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_power_down_dmic56(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_power_state_dmic56_query(struct device *dev); +int ste_audio_io_set_dmic56_gain(enum AUDIOIO_CH_INDEX chnl_index, + u16 gain_index, int gain_value, u32 linear, + struct device *dev); +int ste_audio_io_get_dmic56_gain(int*, int*, u16, + struct device *dev); +int ste_audio_io_mute_dmic56(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_unmute_dmic56(enum AUDIOIO_CH_INDEX chnl_index, int *gain, + struct device *dev); +int ste_audio_io_mute_dmic56_state(struct device *dev); +int ste_audio_io_enable_fade_dmic56(struct device *dev); +int ste_audio_io_disable_fade_dmic56(struct device *dev); + +int ste_audio_io_power_up_fmrx(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_power_down_fmrx(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_power_state_fmrx_query(struct device *dev); +int ste_audio_io_set_fmrx_gain(enum AUDIOIO_CH_INDEX chnl_index, u16 gain_index, + int gain_value, u32 linear, + struct device *dev); +int ste_audio_io_get_fmrx_gain(int*, int*, u16, + struct device *dev); +int ste_audio_io_mute_fmrx(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_unmute_fmrx(enum AUDIOIO_CH_INDEX chnl_index, int *gain, + struct device *dev); +int ste_audio_io_mute_fmrx_state(struct device *dev); +int ste_audio_io_enable_fade_fmrx(struct device *dev); +int ste_audio_io_disable_fade_fmrx(struct device *dev); + +int ste_audio_io_power_up_fmtx(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_power_down_fmtx(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_power_state_fmtx_query(struct device *dev); +int ste_audio_io_set_fmtx_gain(enum AUDIOIO_CH_INDEX chnl_index, u16 gain_index, + int gain_value, u32 linear, + struct device *dev); +int ste_audio_io_get_fmtx_gain(int*, int*, u16, + struct device *dev); +int ste_audio_io_mute_fmtx(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_unmute_fmtx(enum AUDIOIO_CH_INDEX chnl_index, int *gain, + struct device *dev); +int ste_audio_io_mute_fmtx_state(struct device *dev); +int ste_audio_io_enable_fade_fmtx(struct device *dev); +int ste_audio_io_disable_fade_fmtx(struct device *dev); + +int ste_audio_io_power_up_bluetooth(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_power_down_bluetooth(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_power_state_bluetooth_query(struct device *dev); +int ste_audio_io_set_bluetooth_gain(enum AUDIOIO_CH_INDEX chnl_index, + u16 gain_index, int gain_value, u32 linear, + struct device *dev); +int ste_audio_io_get_bluetooth_gain(int*, int*, u16, + struct device *dev); +int ste_audio_io_mute_bluetooth(enum AUDIOIO_CH_INDEX chnl_index, + struct device *dev); +int ste_audio_io_unmute_bluetooth(enum AUDIOIO_CH_INDEX chnl_index, int *gain, + struct device *dev); +int ste_audio_io_mute_bluetooth_state(struct device *dev); +int ste_audio_io_enable_fade_bluetooth(struct device *dev); +int ste_audio_io_disable_fade_bluetooth(struct device *dev); + + +#endif + diff --git a/drivers/misc/audio_io_dev/ste_audio_io_hwctrl_common.c b/drivers/misc/audio_io_dev/ste_audio_io_hwctrl_common.c new file mode 100644 index 00000000000..3e9f54f2c6b --- /dev/null +++ b/drivers/misc/audio_io_dev/ste_audio_io_hwctrl_common.c @@ -0,0 +1,189 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Deepak KARDA/ deepak.karda@stericsson.com for ST-Ericsson + * License terms: GNU General Public License (GPL) version 2. + */ + + +#include <linux/types.h> +#include "ste_audio_io_hwctrl_common.h" + +/* Number of channels for each transducer */ +const uint transducer_no_of_channels[MAX_NO_TRANSDUCERS] = { + 1, /* Earpiece */ + 2, /* HS */ + 2, /* IHF */ + 1, /* VibL */ + 1, /* VibR */ + 1, /* Mic1A */ + 1, /* Mic1B */ + 1, /* Mic2 */ + 2, /* LinIn */ + 2, /* DMIC12 */ + 2, /* DMIC34 */ + 2, /* /DMIC56 */ + 4 /* MultiMic */ + }; + +/* Maximum number of gains in each transducer path + (all channels of a specific transducer have same max no of gains) */ +const uint transducer_no_of_gains[MAX_NO_TRANSDUCERS] = { + 2, /* Ear g3 and g4 */ + 3, /* HS g3 and g4 and analog */ + 1, /* IHF g3 */ + 1, /* VibL g3 */ + 1, /* VibR g3 */ + 2, /* Mic1A g1 and analog */ + 2, /* Mic1A g1 and analog */ + 2, /* Mic2 g1 and analog */ + 2, /* LinIn g1 and analog */ + 1, /* DMIC12 g1 */ + 1, /* DMIC34 g1 */ + 1, /* DMIC56 g1 */ + 1 /* MultiMic g1 */ + }; + +const uint transducer_no_Of_supported_loop_indexes[MAX_NO_TRANSDUCERS] = { + 0x09,/* Ear0x01|0x08*/ + 0x38770,/*{0x01|0x10|0x20|0x40|0x100*/ + /*|0x200|0x400|0x8000|0x10000|0x20000}, HS*/ + 0x86,/*IHF*/ + 0x0,/*VibL*/ + 0x0,/*VibR*/ + 0x0,/*Mic1A*/ + 0x01,/*Mic1B Sidetone is controlled*/ + 0x0,/*Mic2*/ + 0x0,/*LinIn*/ + 0x0,/*DMIC12*/ + 0x0,/*DMIC34*/ + 0x0,/*DMIC56*/ + 0x01,/*MultiMic Sidetone is controlled*/ + 0x0,/*FMRx*/ + 0x0/*FMTx*/ + }; + +const uint transducer_max_no_Of_supported_loops[MAX_NO_TRANSDUCERS] = { + 0,/*Ear Sidetone*/ + 2,/*HS SideTone LININ_HS LININR_HSR LININ_HSL*/ + 1,/*IHF TO BE DEFINED*/ + 0,/*VibL TO BE DEFINED*/ + 0,/*VibR TO BE DEFINED*/ + 1,/*Mic1A TO BE DEFINED*/ + 1,/*Mic1B SIDETONE TO BE DEFINED*/ + 1,/*Mic2 TO BE DEFINED*/ + 0, /* LinIn */ + 1,/*DMIC12-ANC*/ + 0,/*DMIC34-ANC*/ + 0, /* DMIC56 */ + 1,/*MultiMic-SIDETONE ANC*/ + 0,/*FMRx*/ + 0/*FMTx*/ + }; + +const uint max_no_of_loop_gains[MAX_NO_TRANSDUCERS] = { + 0,/*Earpiece*/ + 2,/*HS*/ + 0, + 0, + 0, + 0, + 2,/*Mic1B-Sidetone 2 gains*/ + 0, + 0, + 2,/*DMIC12*/ + 0, + 0, + 2,/*Multimic, Sidetone max no gains = 2*/ + 0, + 0 + }; + + +struct gain_descriptor_t gain_descriptor[MAX_NO_TRANSDUCERS]\ + [MAX_NO_CHANNELS][MAX_NO_GAINS] = { + /* gainIndex=0 1 2 + EDestinationEar */ + {{{-63, 0, 1}, {-1, 8, 1}, {0, 0, 0} } ,/* channelIndex=0 */ + {{0, 0, 0}, {0, 0, 0}, {0, 0, 0} } ,/* channelIndex=1 */ + {{0, 0, 0}, {0, 0, 0}, {0, 0, 0} } ,/* channelIndex=2 */ + {{0, 0, 0}, {0, 0, 0}, {0, 0, 0} } } ,/* channelIndex=3 */ + + /* EDestinationHS */ + {{{-63, 0, 1}, {-1, 8, 1}, {-32, 4, 2} } , /* channelIndex=0 */ + {{-63, 0, 1}, {-1, 8, 1}, {-32, 4, 2} } , /* channelIndex=1 */ + {{0, 0, 0}, {0, 0, 0}, {0, 0, 0} } , /* channelIndex=2 */ + {{0, 0, 0}, {0, 0, 0}, {0, 0, 0} } } , /* channelIndex=3 */ + + /* EDestinationIHF */ + {{{-63, 0, 1}, {0, 0, 0}, {0, 0, 0} } ,/* channelIndex=0 */ + {{-63, 0, 1}, {0, 0, 0}, {0, 0, 0} } ,/* channelIndex=1 */ + {{0, 0, 0}, {0, 0, 0}, {0, 0, 0} } ,/* channelIndex=2 */ + {{0, 0, 0}, {0, 0, 0}, {0, 0, 0} } } ,/* channelIndex=3 */ + + /* EDestinationVibL */ + {{{-63, 0, 1}, {0, 0, 0}, {0, 0, 0} } ,/* channelIndex=0 */ + {{0, 0, 0}, {0, 0, 0}, {0, 0, 0} } ,/* channelIndex=1 */ + {{0, 0, 0}, {0, 0, 0}, {0, 0, 0} } ,/* channelIndex=2 */ + {{0, 0, 0}, {0, 0, 0}, {0, 0, 0} } } ,/* channelIndex=3 */ + + /* EDestinationVibR */ + {{{-63, 0, 1}, {0, 0, 0}, {0, 0, 0} } ,/* channelIndex=0 */ + {{0, 0, 0}, {0, 0, 0}, {0, 0, 0} } ,/* channelIndex=1 */ + {{0, 0, 0}, {0, 0, 0}, {0, 0, 0} } ,/* channelIndex=2 */ + {{0, 0, 0}, {0, 0, 0}, {0, 0, 0} } } ,/* channelIndex=3 */ + + /* ESourceMic1A */ + {{{-32, 31, 1}, {0, 31, 1}, {0, 0, 0} } ,/* channelIndex=0 */ + {{0, 0, 0}, {0, 0, 0}, {0, 0, 0} } ,/* channelIndex=1 */ + {{0, 0, 0}, {0, 0, 0}, {0, 0, 0} } ,/* channelIndex=2 */ + {{0, 0, 0}, {0, 0, 0}, {0, 0, 0} } } ,/* channelIndex=3 */ + + /* ESourceMic1B */ + {{{-32, 31, 1}, {0, 31, 1}, {0, 0, 0} } ,/* channelIndex=0 */ + {{0, 0, 0}, {0, 0, 0}, {0, 0, 0} } ,/* channelIndex=1 */ + {{0, 0, 0}, {0, 0, 0}, {0, 0, 0} } ,/* channelIndex=2 */ + {{0, 0, 0}, {0, 0, 0}, {0, 0, 0} } } ,/* channelIndex=3 */ + + /* ESourceMic2 */ + {{{-32, 31, 1}, {0, 31, 1}, {0, 0, 0} } ,/* channelIndex=0 */ + {{0, 0, 0}, {0, 0, 0}, {0, 0, 0} } ,/* channelIndex=1 */ + {{0, 0, 0}, {0, 0, 0}, {0, 0, 0} } ,/* channelIndex=2 */ + {{0, 0, 0}, {0, 0, 0}, {0, 0, 0} } } ,/* channelIndex=3 */ + + /* ESourceLin */ + {{{-32, 31, 1}, {-10, 20, 2}, {0, 0, 0} } ,/* channelIndex=0 */ + {{-32, 31, 1}, {-10, 20, 2}, {0, 0, 0} } ,/* channelIndex=1 */ + {{0, 0, 0}, {0, 0, 0}, {0, 0, 0} } ,/* channelIndex=2 */ + {{0, 0, 0}, {0, 0, 0}, {0, 0, 0} } } ,/* channelIndex=3 */ + + /* ESourceDMic12 */ + {{{-32, 31, 1}, {0, 0, 0}, {0, 0, 0} } ,/* channelIndex=0 */ + {{-32, 31, 1}, {0, 0, 0}, {0, 0, 0} } ,/* channelIndex=1 */ + {{0, 0, 0}, {0, 0, 0}, {0, 0, 0} } ,/* channelIndex=2 */ + {{0, 0, 0}, {0, 0, 0}, {0, 0, 0} } } ,/* channelIndex=3 */ + + /* ESourceDMic34 */ + {{{-32, 31, 1}, {0, 0, 0}, {0, 0, 0} } ,/* channelIndex=0 */ + {{-32, 31, 1}, {0, 0, 0}, {0, 0, 0} } ,/* channelIndex=1 */ + {{0, 0, 0}, {0, 0, 0}, {0, 0, 0} } ,/* channelIndex=2 */ + {{0, 0, 0}, {0, 0, 0}, {0, 0, 0} } } ,/* channelIndex=3 */ + + /* ESourceDMic56 */ + {{{-32, 31, 1}, {0, 0, 0}, {0, 0, 0} } ,/* channelIndex=0 */ + {{-32, 31, 1}, {0, 0, 0}, {0, 0, 0} } ,/* channelIndex=1 */ + {{0, 0, 0}, {0, 0, 0}, {0, 0, 0} } ,/* channelIndex=2 */ + {{0, 0, 0}, {0, 0, 0}, {0, 0, 0} } } ,/* channelIndex=3 */ + + /* ESourceMultiMic */ + {{{-32, 31, 1}, {0, 0, 0}, {0, 0, 0} } ,/* channelIndex=0 */ + {{-32, 31, 1}, {0, 0, 0}, {0, 0, 0} } ,/* channelIndex=1 */ + {{-32, 31, 1}, {0, 0, 0}, {0, 0, 0} },/* channelIndex=2 */ + {{-32, 31, 1}, {0, 0, 0}, {0, 0, 0} } } /* channelIndex=3 */ +}; + + +const int hs_analog_gain_table[16] = {4, 2, 0, -2, -4, -6, -8, -10, + -12, -14, -16, -18, -20, -24, -28, -32}; + + + diff --git a/drivers/misc/audio_io_dev/ste_audio_io_hwctrl_common.h b/drivers/misc/audio_io_dev/ste_audio_io_hwctrl_common.h new file mode 100644 index 00000000000..cc2bfe21d81 --- /dev/null +++ b/drivers/misc/audio_io_dev/ste_audio_io_hwctrl_common.h @@ -0,0 +1,50 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Deepak KARDA/ deepak.karda@stericsson.com for ST-Ericsson + * License terms: GNU General Public License (GPL) version 2. + */ + +#ifndef __AUDIOIO_HWCTRL_COMMON_H__ +#define __AUDIOIO_HWCTRL_COMMON_H__ + +#include <linux/types.h> +#include <mach/ste_audio_io_ioctl.h> +/* + * Defines + */ + +#define MAX_GAIN 100 +#define MIN_GAIN 0 +#define MAX_NO_CHANNELS 4 +#define MAX_NO_GAINS 3 +#define MAX_NO_LOOPS 1 +#define MAX_NO_LOOP_GAINS 1 + +struct gain_descriptor_t { + int min_gain; + int max_gain; + uint gain_step; +}; + + +/* Number of channels for each transducer */ +extern const uint transducer_no_of_channels[MAX_NO_TRANSDUCERS]; + +/* + * Maximum number of gains in each transducer path + * all channels of a specific transducer have same max no of gains + */ +extern const uint transducer_no_of_gains[MAX_NO_TRANSDUCERS]; + +/* Maximum number of supported loops for each transducer */ +extern const uint transducer_no_Of_supported_loop_indexes[MAX_NO_TRANSDUCERS]; +extern const uint transducer_max_no_Of_supported_loops[MAX_NO_TRANSDUCERS]; +extern const uint max_no_of_loop_gains[MAX_NO_TRANSDUCERS]; +extern const int hs_analog_gain_table[16] ; + +extern struct gain_descriptor_t gain_descriptor[MAX_NO_TRANSDUCERS]\ + [MAX_NO_CHANNELS][MAX_NO_GAINS]; + +#endif + +/* End of audio_io_hwctrl_common.h */ diff --git a/drivers/misc/bh1780gli.c b/drivers/misc/bh1780gli.c new file mode 100644 index 00000000000..9d0b4b9ee94 --- /dev/null +++ b/drivers/misc/bh1780gli.c @@ -0,0 +1,306 @@ +/* + * bh1780gli.c + * ROHM Ambient Light Sensor Driver + * + * Copyright (C) 2010 Texas Instruments + * Author: Hemanth V <hemanthv@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ +#include <linux/i2c.h> +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/delay.h> +#include <linux/regulator/consumer.h> + +#define BH1780_REG_CONTROL 0x80 +#define BH1780_REG_PARTID 0x8A +#define BH1780_REG_MANFID 0x8B +#define BH1780_REG_DLOW 0x8C +#define BH1780_REG_DHIGH 0x8D + +#define BH1780_REVMASK (0xf) +#define BH1780_POWMASK (0x3) +#define BH1780_POFF (0x0) +#define BH1780_PON (0x3) + +/* power on settling time in ms */ +#define BH1780_PON_DELAY 2 + +struct bh1780_data { + struct i2c_client *client; + struct regulator *regulator; + int power_state; + /* lock for sysfs operations */ + struct mutex lock; +}; + +static int bh1780_write(struct bh1780_data *ddata, u8 reg, u8 val, char *msg) +{ + int ret = i2c_smbus_write_byte_data(ddata->client, reg, val); + if (ret < 0) + dev_err(&ddata->client->dev, + "i2c_smbus_write_byte_data failed error %d\ + Register (%s)\n", ret, msg); + return ret; +} + +static int bh1780_read(struct bh1780_data *ddata, u8 reg, char *msg) +{ + int ret = i2c_smbus_read_byte_data(ddata->client, reg); + if (ret < 0) + dev_err(&ddata->client->dev, + "i2c_smbus_read_byte_data failed error %d\ + Register (%s)\n", ret, msg); + return ret; +} + +static ssize_t bh1780_show_lux(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = to_platform_device(dev); + struct bh1780_data *ddata = platform_get_drvdata(pdev); + int lsb, msb; + + if (ddata->power_state == BH1780_POFF) + return -EINVAL; + + lsb = bh1780_read(ddata, BH1780_REG_DLOW, "DLOW"); + if (lsb < 0) + return lsb; + + msb = bh1780_read(ddata, BH1780_REG_DHIGH, "DHIGH"); + if (msb < 0) + return msb; + + return sprintf(buf, "%d\n", (msb << 8) | lsb); +} + +static ssize_t bh1780_show_power_state(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct platform_device *pdev = to_platform_device(dev); + struct bh1780_data *ddata = platform_get_drvdata(pdev); + + /* we already maintain a sw state */ + return sprintf(buf, "%d\n", ddata->power_state); +} + +static ssize_t bh1780_store_power_state(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct platform_device *pdev = to_platform_device(dev); + struct bh1780_data *ddata = platform_get_drvdata(pdev); + long val; + int error; + + error = strict_strtoul(buf, 0, &val); + if (error) + return error; + + if (val < BH1780_POFF || val > BH1780_PON) + return -EINVAL; + + if (ddata->power_state == val) + return count; + + mutex_lock(&ddata->lock); + + if (ddata->power_state == BH1780_POFF) + regulator_enable(ddata->regulator); + + error = bh1780_write(ddata, BH1780_REG_CONTROL, val, "CONTROL"); + if (error < 0) { + mutex_unlock(&ddata->lock); + regulator_disable(ddata->regulator); + return error; + } + + if (val == BH1780_POFF) + regulator_disable(ddata->regulator); + + msleep(BH1780_PON_DELAY); + ddata->power_state = val; + mutex_unlock(&ddata->lock); + + return count; +} + +static DEVICE_ATTR(lux, S_IRUGO, bh1780_show_lux, NULL); + +static DEVICE_ATTR(power_state, S_IWUGO | S_IRUGO, + bh1780_show_power_state, bh1780_store_power_state); + +static struct attribute *bh1780_attributes[] = { + &dev_attr_power_state.attr, + &dev_attr_lux.attr, + NULL +}; + +static const struct attribute_group bh1780_attr_group = { + .attrs = bh1780_attributes, +}; + +static int __devinit bh1780_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int ret; + struct bh1780_data *ddata = NULL; + struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE)) { + ret = -EIO; + return ret; + } + + ddata = kzalloc(sizeof(struct bh1780_data), GFP_KERNEL); + if (ddata == NULL) { + dev_err(&client->dev, "failed to alloc ddata\n"); + ret = -ENOMEM; + return ret; + } + + ddata->client = client; + i2c_set_clientdata(client, ddata); + dev_set_name(&client->dev, "bh1780"); + + ddata->regulator = regulator_get(&client->dev, "v-als"); + if (IS_ERR(ddata->regulator)) { + dev_err(&client->dev, "failed to get regulator\n"); + ret = PTR_ERR(ddata->regulator); + goto free_ddata; + } + + regulator_enable(ddata->regulator); + + ret = bh1780_read(ddata, BH1780_REG_PARTID, "PART ID"); + if (ret < 0) { + dev_err(&client->dev, "failed to read part ID\n"); + goto put_regulator; + } + + regulator_disable(ddata->regulator); + ddata->power_state = BH1780_POFF; + + dev_info(&client->dev, "Ambient Light Sensor, Rev : %d\n", + (ret & BH1780_REVMASK)); + + mutex_init(&ddata->lock); + + ret = sysfs_create_group(&client->dev.kobj, &bh1780_attr_group); + if (ret) + goto put_regulator; + + return 0; + +put_regulator: + regulator_disable(ddata->regulator); + regulator_put(ddata->regulator); +free_ddata: + kfree(ddata); + return ret; +} + +static int __devexit bh1780_remove(struct i2c_client *client) +{ + struct bh1780_data *ddata; + + ddata = i2c_get_clientdata(client); + sysfs_remove_group(&client->dev.kobj, &bh1780_attr_group); + i2c_set_clientdata(client, NULL); + kfree(ddata); + + return 0; +} + +#ifdef CONFIG_PM +static int bh1780_suspend(struct device *dev) +{ + struct bh1780_data *ddata = dev_get_drvdata(dev); + int ret = 0; + + if (ddata->power_state == BH1780_POFF) + return 0; + + ret = bh1780_write(ddata, BH1780_REG_CONTROL, BH1780_POFF, + "CONTROL"); + if (ret < 0) + return ret; + + regulator_disable(ddata->regulator); + + return 0; +} + +static int bh1780_resume(struct device *dev) +{ + struct bh1780_data *ddata = dev_get_drvdata(dev); + int ret = 0; + + if (ddata->power_state == BH1780_POFF) + return 0; + + regulator_enable(ddata->regulator); + + ret = bh1780_write(ddata, BH1780_REG_CONTROL, ddata->power_state, + "CONTROL"); + if (ret < 0) + return ret; + + return 0; +} + +static const struct dev_pm_ops bh1780_dev_pm_ops = { + .suspend = bh1780_suspend, + .resume = bh1780_resume, +}; + +#endif /* CONFIG_PM */ + +static const struct i2c_device_id bh1780_id[] = { + { "bh1780", 0 }, + { }, +}; + +static struct i2c_driver bh1780_driver = { + .probe = bh1780_probe, + .remove = bh1780_remove, + .id_table = bh1780_id, + .driver = { + .name = "bh1780", +#ifdef CONFIG_PM + .pm = &bh1780_dev_pm_ops, +#endif + }, +}; + +static int __init bh1780_init(void) +{ + return i2c_add_driver(&bh1780_driver); +} + +static void __exit bh1780_exit(void) +{ + i2c_del_driver(&bh1780_driver); +} + +module_init(bh1780_init) +module_exit(bh1780_exit) + +MODULE_DESCRIPTION("BH1780GLI Ambient Light Sensor Driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Hemanth V <hemanthv@ti.com>"); diff --git a/drivers/misc/db8500-modem-trace.c b/drivers/misc/db8500-modem-trace.c new file mode 100644 index 00000000000..0d739fb4694 --- /dev/null +++ b/drivers/misc/db8500-modem-trace.c @@ -0,0 +1,273 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Authors: Michel JAOUEN <michel.jaouen@stericsson.com> + * Maxime COQUELIN <maxime.coquelin-nonst@stericsson.com> + * for ST-Ericsson + * License terms: GNU General Public License (GPL), version 2 + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/spinlock.h> +#include <linux/device.h> +#include <linux/cdev.h> +#include <linux/poll.h> +#include <linux/miscdevice.h> +#include <linux/platform_device.h> +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/uaccess.h> +#include <linux/mman.h> +#include <linux/db8500-modem-trace.h> + +#include <mach/hardware.h> + +#define DEVICE_NAME "db8500-modem-trace" + +/* activation of this flag triggers an initialization of 2 buffers + * 4kbytes , id 0xdeadbeef + * and 16Kbytes id 0xfadafada + * we assume that platform provides minimum 20Kbytes. */ + +struct trace { + u32 start; + u32 end; + u32 mdm_base; + u32 ape_base; + void __iomem *area; + /* this spinlock to forbid concurrent access on the same trace buffer */ + spinlock_t lock; + struct device *dev; + struct miscdevice misc_dev; +}; + +struct trace_modem { + u32 phys_addr; + u8 filler; +}; + +static struct trace *trace_priv; + + +/* all this definition are linked to modem interface */ +#define MODEM_MARKER 0x88 +/* free marker is also written on filler */ +#define FREE_MARKER 0xa5 +#define FREE_MARKER_2 0xa5a5 +#define READ_MARKER 0x5a + +struct buffer_header { + u8 pattern; + u8 filler; + u16 head_size; +}; + + +static int trace_read(unsigned long arg) +{ + struct modem_trace_req req; + struct buffer_header *pt; + char tmp_char; + + if (copy_from_user(&req, (struct modem_trace_req *)arg, + sizeof(struct modem_trace_req))) + return -EFAULT; + + /* compute Modem physical address to APE physical address range */ + if (req.phys_addr < trace_priv->mdm_base) { + dev_err(trace_priv->dev, "MODEM ADDR uncorrect\n"); + return -EINVAL; + } + req.phys_addr += trace_priv->ape_base - trace_priv->mdm_base; + + /* check request is in the range and aligned */ + if ((req.phys_addr % 4 != 0) + || (req.phys_addr < trace_priv->start) + || (req.phys_addr + req.size) >= trace_priv->end) { + dev_err(trace_priv->dev, "req out of range %x %x\n", + req.phys_addr, req.size); + return -EINVAL; + } + + /* perform access to memory area */ + pt = (struct buffer_header *)((u32)trace_priv->area + + req.phys_addr - trace_priv->start); + + /* in case of several request coming on same trace buffer take a + * spinlock */ + spin_lock(&trace_priv->lock); + if (pt->pattern != MODEM_MARKER) { + /* pattern and size not matching */ + dev_err(trace_priv->dev, "req not matching filler %x/%x \ + or/and pattern %x\n", req.filler, pt->filler, + pt->pattern); + spin_unlock(&trace_priv->lock); + return -EINVAL; + } + /* mark pattern as read and unlock spin */ + pt->pattern = READ_MARKER; + spin_unlock(&trace_priv->lock); + + req.size -= copy_to_user(req.buff, pt, req.size); + + pt->pattern = FREE_MARKER; + pt->filler = FREE_MARKER; + tmp_char = MODEM_MARKER; + + /* Update marker for trace tool */ + if (copy_to_user(req.buff, &tmp_char, 1)) + return -EFAULT; + + /* Update effective written size */ + if (copy_to_user((struct modem_trace_req *)arg, &req, + sizeof(struct modem_trace_req))) + return -EFAULT; + + return 0; +} + +static int trace_mmapdump(struct file *file, struct vm_area_struct *vma) +{ + unsigned long vma_start = vma->vm_start; + + if (vma->vm_flags & VM_WRITE) + return -EPERM; + + if ((vma->vm_end - vma->vm_start) < + (trace_priv->end - trace_priv->start)) + return -EINVAL; + if (remap_pfn_range(vma, + vma_start, + trace_priv->start >> PAGE_SHIFT, + trace_priv->end - trace_priv->start, + vma->vm_page_prot)) + return -EAGAIN; + return 0; +} + +static long trace_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + long ret = 0; + void __user *argp = (void __user *)arg; + unsigned long size = trace_priv->end-trace_priv->start; + + switch (cmd) { + case TM_GET_DUMPINFO: + ret = put_user(size, (unsigned long *)argp); + break; + case TM_TRACE_REQ: + ret = trace_read(arg); + break; + + default: + ret = -EPERM; + break; + } + return ret; +} + +static const struct file_operations trace_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = trace_ioctl, + .mmap = trace_mmapdump +}; + +static int trace_probe(struct platform_device *pdev) +{ + int rv = 0; + struct db8500_trace_platform_data *pdata = pdev->dev.platform_data; + /* retrieve area descriptor from platform device ressource */ + struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + if ((mem->start == 0) && (mem->end == 0)) { + rv = -EINVAL; + goto out; + } + + if ((pdata->ape_base == 0) || (pdata->modem_base == 0)) { + rv = -EINVAL; + goto out; + } + + trace_priv = kzalloc(sizeof(*trace_priv), GFP_ATOMIC); + if (!trace_priv) { + rv = -ENOMEM; + goto out; + } + + trace_priv->dev = &pdev->dev; + trace_priv->misc_dev.minor = MISC_DYNAMIC_MINOR; + trace_priv->misc_dev.name = DEVICE_NAME; + trace_priv->misc_dev.fops = &trace_fops; + trace_priv->area = (void __iomem *)ioremap_nocache(mem->start, + mem->end - mem->start); + if (!trace_priv->area) { + rv = -ENOMEM; + goto outfree; + } + + trace_priv->start = mem->start; + trace_priv->end = mem->end; + + trace_priv->mdm_base = pdata->modem_base; + trace_priv->ape_base = pdata->ape_base; + + /* spin allowing smp access for reading/writing trace buffer header */ + spin_lock_init(&trace_priv->lock); + + rv = misc_register(&trace_priv->misc_dev); + if (rv) { + dev_err(&pdev->dev, "can't misc_register\n"); + goto outunmap; + } + + return rv; + +outunmap: + iounmap(trace_priv->area); +outfree: + kfree(trace_priv); +out: + return rv; + +} + +static int trace_remove(struct platform_device *pdev) +{ + int rv = 0; + + if (trace_priv) { + rv = misc_deregister(&trace_priv->misc_dev); + iounmap(trace_priv->area); + kfree(trace_priv); + } + + return rv; +} + +static struct platform_driver trace_driver = { + .probe = trace_probe, + .remove = trace_remove, + .driver = { + .name = "db8500-modem-trace", + .owner = THIS_MODULE, + }, +}; + +static int trace_init(void) +{ + platform_driver_register(&trace_driver); + return 0; +} +static void trace_exit(void) +{ + platform_driver_unregister(&trace_driver); +} +module_init(trace_init); +module_exit(trace_exit); + +MODULE_AUTHOR("ST-Ericsson"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/dbx500-mloader.c b/drivers/misc/dbx500-mloader.c new file mode 100644 index 00000000000..c3ec8b67983 --- /dev/null +++ b/drivers/misc/dbx500-mloader.c @@ -0,0 +1,269 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Ludovic Barre <ludovic.barre@stericsson.com> for ST-Ericsson. + * License terms: GNU General Public License (GPL), version 2 + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/fs.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/device.h> +#include <linux/miscdevice.h> +#include <linux/firmware.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/uaccess.h> +#include <linux/mman.h> +#include <linux/io.h> + +#include <mach/mloader-dbx500.h> +#include <linux/mloader.h> + +#define DEVICE_NAME "dbx500_mloader_fw" + +struct mloader_priv { + struct platform_device *pdev; + struct dbx500_mloader_pdata *pdata; + struct miscdevice misc_dev; + u32 aeras_size; +}; + +static struct mloader_priv *mloader_priv; + +static int mloader_fw_send(struct dbx500_ml_fw *fw_info) +{ + const struct firmware *fw; + unsigned long size; + unsigned long phys_start; + void *fw_data; + void *vaddr; + void __iomem *ioaddr; + int ret; + + ret = request_firmware(&fw, fw_info->name, &mloader_priv->pdev->dev); + if (ret) { + dev_err(&mloader_priv->pdev->dev, "request firmware failed\n"); + goto out; + } + + if (fw->size > (fw_info->area->size - fw_info->offset)) { + dev_err(&mloader_priv->pdev->dev, + "fw:%s is too big for:%s\n", + fw_info->name, fw_info->area->name); + ret = -EINVAL; + goto err_fw; + } + + size = PAGE_ALIGN(fw->size); + phys_start = fw_info->area->start + fw_info->offset; + phys_start &= PAGE_MASK; + ioaddr = ioremap(phys_start, size); + if (!ioaddr) { + dev_err(&mloader_priv->pdev->dev, + "failed remap memory region.\n"); + ret = -EINVAL; + goto err_fw; + } + + vaddr = ioaddr + (fw_info->offset & ~PAGE_MASK); + fw_data = (void *)fw->data; + memcpy_toio(vaddr, fw_data, fw->size); + iounmap(ioaddr); + +err_fw: + release_firmware(fw); +out: + return ret; +} + +static int mloader_fw_upload(void) +{ + int i, ret; + struct dbx500_mloader_pdata *pdata = mloader_priv->pdata; + + for (i = 0; i < pdata->nr_fws; i++) { + ret = mloader_fw_send(&pdata->fws[i]); + if (ret) + goto err; + } + + return 0; +err: + dev_err(&mloader_priv->pdev->dev, + "Failed to upload %s firmware", pdata->fws[i].name); + return ret; +} + +static int mloader_fw_mmapdump(struct file *file, struct vm_area_struct *vma) +{ + int i; + unsigned long dump_size = 0; + unsigned long vma_start = vma->vm_start; + + if (vma->vm_flags & VM_WRITE) + return -EPERM; + + for (i = 0 ; i < mloader_priv->pdata->nr_areas ; i++) + dump_size += mloader_priv->pdata->areas[i].size; + + if ((vma->vm_end - vma->vm_start) < dump_size) + return -EINVAL; + + for (i = 0 ; i < mloader_priv->pdata->nr_areas ; i++) { + if (remap_pfn_range(vma, + vma_start, + mloader_priv->pdata->areas[i].start >> PAGE_SHIFT, + mloader_priv->pdata->areas[i].size, + vma->vm_page_prot)) + return -EAGAIN; + vma_start += mloader_priv->pdata->areas[i].size; + } + return 0; +} + +static void mloader_fw_dumpinfo(struct dump_image *images) +{ + u32 offset = 0; + int i; + + for (i = 0 ; i < mloader_priv->pdata->nr_areas ; i++) { + strncpy(images[i].name, + mloader_priv->pdata->areas[i].name, MAX_NAME); + images[i].name[MAX_NAME-1] = 0; + images[i].offset = offset; + images[i].size = mloader_priv->pdata->areas[i].size; + offset += mloader_priv->pdata->areas[i].size; + } +} + +static long mloader_fw_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + long ret = 0; + void __user *argp = (void __user *)arg; + + switch (cmd) { + case ML_UPLOAD: + ret = mloader_fw_upload(); + break; + case ML_GET_NBIMAGES: + ret = put_user(mloader_priv->pdata->nr_areas, + (unsigned long __user *)argp); + break; + case ML_GET_DUMPINFO: { + struct dump_image *dump_images; + dump_images = kzalloc(mloader_priv->pdata->nr_areas + * sizeof(struct dump_image), GFP_ATOMIC); + mloader_fw_dumpinfo(dump_images); + ret = copy_to_user(argp, (void *) dump_images, + mloader_priv->pdata->nr_areas + * sizeof(struct dump_image)) ? -EFAULT : 0; + kfree(dump_images); + break; + } + default: + ret = -EPERM; + break; + } + + return ret; +} + +static const struct file_operations modem_fw_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = mloader_fw_ioctl, + .mmap = mloader_fw_mmapdump, +}; + +static int __devinit mloader_fw_probe(struct platform_device *pdev) +{ + int ret = 0; + int i; + + mloader_priv = kzalloc(sizeof(*mloader_priv), GFP_ATOMIC); + if (!mloader_priv) { + ret = -ENOMEM; + goto out; + } + + mloader_priv->pdev = pdev; + mloader_priv->pdata = pdev->dev.platform_data; + + mloader_priv->misc_dev.minor = MISC_DYNAMIC_MINOR; + mloader_priv->misc_dev.name = DEVICE_NAME; + mloader_priv->misc_dev.fops = &modem_fw_fops; + ret = misc_register(&mloader_priv->misc_dev); + if (ret < 0) { + dev_err(&pdev->dev, "can't misc_register\n"); + goto err_free_priv; + } + + dev_info(&mloader_priv->pdev->dev, "mloader device register\n"); + + for (i = 0 ; i < mloader_priv->pdata->nr_areas ; i++) { + dev_dbg(&mloader_priv->pdev->dev, + "Area:%d (name:%s start:%x size:%x)\n", + i, mloader_priv->pdata->areas[i].name, + mloader_priv->pdata->areas[i].start, + mloader_priv->pdata->areas[i].size); + } + + for (i = 0 ; i < mloader_priv->pdata->nr_fws ; i++) { + dev_dbg(&mloader_priv->pdev->dev, + "Firmware:%d (name:%s offset:%x " + "area_name:%s area_start:%x area_size:%x)\n", + i, mloader_priv->pdata->fws[i].name, + mloader_priv->pdata->fws[i].offset, + mloader_priv->pdata->fws[i].area->name, + mloader_priv->pdata->fws[i].area->start, + mloader_priv->pdata->fws[i].area->size); + } + + return ret; + +err_free_priv: + kfree(mloader_priv); +out: + return ret; +} + +static int __devexit mloader_fw_remove(struct platform_device *pdev) +{ + int err; + + err = misc_register(&mloader_priv->misc_dev); + if (err < 0) + dev_err(&pdev->dev, "can't misc_deregister, %d\n", err); + + kfree(mloader_priv); + + return err; +} + +static struct platform_driver mloader_fw_driver = { + .driver.name = DEVICE_NAME, + .driver.owner = THIS_MODULE, + .probe = mloader_fw_probe, + .remove = __devexit_p(mloader_fw_remove), +}; + +static int __init mloader_fw_init(void) +{ + return platform_driver_register(&mloader_fw_driver); +} + +static void __exit mloader_fw_exit(void) +{ + kfree(mloader_priv); + platform_driver_unregister(&mloader_fw_driver); +} + +module_init(mloader_fw_init); +module_exit(mloader_fw_exit); +MODULE_DESCRIPTION("ST-Ericsson modem loader firmware"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Ludovic Barre <ludovic.barre@stericsson.com>"); diff --git a/drivers/misc/dispdev/Makefile b/drivers/misc/dispdev/Makefile new file mode 100644 index 00000000000..11dc7611d26 --- /dev/null +++ b/drivers/misc/dispdev/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_DISPDEV) += dispdev.o diff --git a/drivers/misc/dispdev/dispdev.c b/drivers/misc/dispdev/dispdev.c new file mode 100644 index 00000000000..969bb2a1bc1 --- /dev/null +++ b/drivers/misc/dispdev/dispdev.c @@ -0,0 +1,522 @@ +/* + * Copyright (C) ST-Ericsson SA 2011 + * + * Display output device driver + * + * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/kernel.h> +#include <linux/fs.h> +#include <linux/idr.h> +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/miscdevice.h> +#include <linux/uaccess.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/wait.h> +#include <linux/sched.h> + +#include <linux/dispdev.h> +#include <linux/hwmem.h> +#include <video/mcde_dss.h> + +#define MAX_BUFFERS 4 + +static LIST_HEAD(dev_list); +static DEFINE_MUTEX(dev_list_lock); + +enum buffer_state { + BUF_UNUSED = 0, + BUF_QUEUED, + BUF_ACTIVATED, +/*TODO:waitfordone BUF_DEACTIVATED,*/ + BUF_FREE, + BUF_DEQUEUED, +}; + +struct dispdev_buffer { + struct hwmem_alloc *alloc; + u32 size; + enum buffer_state state; + u32 paddr; /* if pinned */ +}; + +struct dispdev { + bool open; + struct mutex lock; + struct miscdevice mdev; + struct list_head list; + struct mcde_display_device *ddev; + struct mcde_overlay *ovly; + struct dispdev_config config; + bool overlay; + struct dispdev_buffer buffers[MAX_BUFFERS]; + wait_queue_head_t waitq_dq; +}; + +int dispdev_open(struct inode *inode, struct file *file) +{ + int ret; + struct dispdev *dd = NULL; + + mutex_lock(&dev_list_lock); + list_for_each_entry(dd, &dev_list, list) + if (dd->mdev.minor == iminor(inode)) + break; + + if (&dd->list == &dev_list) { + mutex_unlock(&dev_list_lock); + return -ENODEV; + } + + if (dd->open) { + mutex_unlock(&dev_list_lock); + return -EBUSY; + } + + dd->open = true; + + mutex_unlock(&dev_list_lock); + + ret = mcde_dss_enable_overlay(dd->ovly); + if (ret) + return ret; + + file->private_data = dd; + + return 0; +} + +int dispdev_release(struct inode *inode, struct file *file) +{ + int i; + struct dispdev *dd = NULL; + + mutex_lock(&dev_list_lock); + list_for_each_entry(dd, &dev_list, list) + if (dd->mdev.minor == iminor(inode)) + break; + mutex_unlock(&dev_list_lock); + + if (&dd->list == &dev_list) + return -ENODEV; + + /* TODO: Make sure it waits for completion */ + mcde_dss_disable_overlay(dd->ovly); + for (i = 0; i < MAX_BUFFERS; i++) { + if (dd->buffers[i].paddr) + hwmem_unpin(dd->buffers[i].alloc); + if (dd->buffers[i].alloc) + hwmem_release(dd->buffers[i].alloc); + dd->buffers[i].alloc = NULL; + dd->buffers[i].state = BUF_UNUSED; + dd->buffers[i].size = 0; + dd->buffers[i].paddr = 0; + } + dd->open = false; + wake_up(&dd->waitq_dq); + + return 0; +} + +static enum mcde_ovly_pix_fmt get_ovly_fmt(enum dispdev_fmt fmt) +{ + switch (fmt) { + default: + case DISPDEV_FMT_RGB565: + return MCDE_OVLYPIXFMT_RGB565; + case DISPDEV_FMT_RGB888: + return MCDE_OVLYPIXFMT_RGB888; + case DISPDEV_FMT_RGBA8888: + return MCDE_OVLYPIXFMT_RGBA8888; + case DISPDEV_FMT_RGBX8888: + return MCDE_OVLYPIXFMT_RGBX8888; + case DISPDEV_FMT_YUV422: + return MCDE_OVLYPIXFMT_YCbCr422; + } +} + +static void get_ovly_info(struct dispdev_config *cfg, + struct mcde_overlay_info *info, bool overlay) +{ + info->paddr = 0; + info->stride = cfg->stride; + info->fmt = get_ovly_fmt(cfg->format); + info->src_x = 0; + info->src_y = 0; + info->dst_x = cfg->x; + info->dst_y = cfg->y; + info->dst_z = cfg->z; + info->w = cfg->width; + info->h = cfg->height; + info->dirty.x = 0; + info->dirty.y = 0; + info->dirty.w = cfg->width; + info->dirty.h = cfg->height; +} + +static int dispdev_set_config(struct dispdev *dd, struct dispdev_config *cfg) +{ + int ret; + struct mcde_overlay_info info; + + if (memcmp(&dd->config, cfg, sizeof(struct dispdev_config)) == 0) + return 0; + + /* Allow change of z,x and y */ + + get_ovly_info(cfg, &info, dd->overlay); + ret = mcde_dss_apply_overlay(dd->ovly, &info); + if (ret) + return ret; + + dd->config = *cfg; + + return 0; +} + +static int find_buf(struct dispdev *dd, enum buffer_state state) +{ + int i; + for (i = 0; i < MAX_BUFFERS; i++) + if (dd->buffers[i].state == state) + return i; + return -1; +} + +static int dispdev_register_buffer(struct dispdev *dd, s32 hwmem_name) +{ + int ret; + struct dispdev_buffer *buf; + enum hwmem_mem_type memtype; + enum hwmem_access access; + + ret = find_buf(dd, BUF_UNUSED); + if (ret < 0) + return -ENOMEM; + buf = &dd->buffers[ret]; + buf->alloc = hwmem_resolve_by_name(hwmem_name); + if (IS_ERR(buf->alloc)) { + ret = PTR_ERR(buf->alloc); + goto resolve_failed; + } + + hwmem_get_info(buf->alloc, &buf->size, &memtype, &access); + + if (!(access & HWMEM_ACCESS_READ) || + memtype != HWMEM_MEM_CONTIGUOUS_SYS) { + ret = -EACCES; + goto invalid_mem; + } + + buf->state = BUF_FREE; + goto out; +invalid_mem: + hwmem_release(buf->alloc); +resolve_failed: +out: + return ret; +} + +static int dispdev_unregister_buffer(struct dispdev *dd, u32 buf_idx) +{ + struct dispdev_buffer *buf = &dd->buffers[buf_idx]; + + if (buf_idx >= ARRAY_SIZE(dd->buffers)) + return -EINVAL; + + if (buf->state == BUF_UNUSED) + return -EINVAL; + + if (buf->state == BUF_ACTIVATED) { + struct mcde_overlay_info info; + /* TODO Wait for frame done */ + get_ovly_info(&dd->config, &info, dd->overlay); + mcde_dss_apply_overlay(dd->ovly, &info); + mcde_dss_update_overlay(dd->ovly, false); + hwmem_unpin(dd->buffers[buf_idx].alloc); + } + + hwmem_release(buf->alloc); + buf->state = BUF_UNUSED; + buf->alloc = NULL; + buf->size = 0; + + return 0; +} + +static int dispdev_queue_buffer(struct dispdev *dd, u32 buf_idx) +{ + int ret, i; + struct mcde_overlay_info info; + struct hwmem_mem_chunk mem_chunk; + size_t mem_chunk_length = 1; + struct hwmem_region rgn = { .offset = 0, .count = 1, .start = 0 }; + struct hwmem_alloc *alloc; + + if (buf_idx >= ARRAY_SIZE(dd->buffers) || + dd->buffers[buf_idx].state != BUF_DEQUEUED) + return -EINVAL; + + alloc = dd->buffers[buf_idx].alloc; + get_ovly_info(&dd->config, &info, dd->overlay); + ret = hwmem_pin(alloc, &mem_chunk, &mem_chunk_length); + if (ret) { + dev_warn(dd->mdev.this_device, "Pin failed, %d\n", ret); + return -EINVAL; + } + + rgn.size = rgn.end = dd->buffers[buf_idx].size; + ret = hwmem_set_domain(alloc, HWMEM_ACCESS_READ, + HWMEM_DOMAIN_SYNC, &rgn); + if (ret) + dev_warn(dd->mdev.this_device, "Set domain failed, %d\n", ret); + + i = find_buf(dd, BUF_ACTIVATED); + if (i >= 0) { + /* TODO Wait for frame done */ + dd->buffers[i].state = BUF_FREE; + wake_up(&dd->waitq_dq); + } + + info.paddr = mem_chunk.paddr; + mcde_dss_apply_overlay(dd->ovly, &info); + mcde_dss_update_overlay(dd->ovly, false); + + dd->buffers[buf_idx].state = BUF_ACTIVATED; + + return 0; +} + +static int dispdev_dequeue_buffer(struct dispdev *dd) +{ + int i; + + i = find_buf(dd, BUF_FREE); + if (i < 0) { + if (find_buf(dd, BUF_ACTIVATED) < 0) + return -EINVAL; + + wait_event(dd->waitq_dq, (i = find_buf(dd, BUF_FREE)) >= 0); + } + hwmem_unpin(dd->buffers[i].alloc); + dd->buffers[i].state = BUF_DEQUEUED; + + return i; +} + +int dispdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, + unsigned long arg) +{ + int ret; + struct dispdev *dd = (struct dispdev *)file->private_data; + + mutex_lock(&dd->lock); + + switch (cmd) { + case DISPDEV_SET_CONFIG_IOC: + { + struct dispdev_config cfg; + if (copy_from_user(&cfg, (void __user *)arg, + sizeof(cfg))) + ret = -EFAULT; + else + ret = dispdev_set_config(dd, &cfg); + } + break; + case DISPDEV_GET_CONFIG_IOC: + ret = copy_to_user((void __user *)arg, &dd->config, + sizeof(dd->config)); + if (ret) + ret = -EFAULT; + break; + case DISPDEV_REGISTER_BUFFER_IOC: + ret = dispdev_register_buffer(dd, (s32)arg); + break; + case DISPDEV_UNREGISTER_BUFFER_IOC: + ret = dispdev_unregister_buffer(dd, (u32)arg); + break; + case DISPDEV_QUEUE_BUFFER_IOC: + ret = dispdev_queue_buffer(dd, (u32)arg); + break; + case DISPDEV_DEQUEUE_BUFFER_IOC: + ret = dispdev_dequeue_buffer(dd); + break; + default: + ret = -ENOSYS; + } + + mutex_unlock(&dd->lock); + + return ret; +} + +static const struct file_operations dispdev_fops = { + .open = dispdev_open, + .release = dispdev_release, + .ioctl = dispdev_ioctl, +}; + +static void init_dispdev(struct dispdev *dd, struct mcde_display_device *ddev, + const char *name, bool overlay) +{ + u16 w, h; + int rotation; + + mutex_init(&dd->lock); + INIT_LIST_HEAD(&dd->list); + dd->ddev = ddev; + dd->overlay = overlay; + mcde_dss_get_native_resolution(ddev, &w, &h); + rotation = mcde_dss_get_rotation(ddev); + + if ((rotation == MCDE_DISPLAY_ROT_90_CCW) || + (rotation == MCDE_DISPLAY_ROT_90_CW)) { + dd->config.width = h; + dd->config.height = w; + } else { + dd->config.width = w; + dd->config.height = h; + } + dd->config.format = DISPDEV_FMT_RGB565; + dd->config.stride = sizeof(u16) * w; + dd->config.z = 0; + init_waitqueue_head(&dd->waitq_dq); + dd->mdev.minor = MISC_DYNAMIC_MINOR; + dd->mdev.name = name; + dd->mdev.fops = &dispdev_fops; + pr_info("%s: name=%s w=%d, h=%d, fmt=%d, stride=%d\n", __func__, name, + dd->config.width, dd->config.height, dd->config.format, + dd->config.stride); +} + +int dispdev_create(struct mcde_display_device *ddev, bool overlay) +{ + int ret = 0; + struct dispdev *dd; + struct mcde_video_mode vmode; + struct mcde_overlay_info info; + + static int counter; + char *name = "dispdev0"; + + dd = kzalloc(sizeof(struct dispdev), GFP_KERNEL); + if (!dd) + return -ENOMEM; + + sprintf(name, "%s%d", DISPDEV_DEFAULT_DEVICE_PREFIX, counter++); + init_dispdev(dd, ddev, name, overlay); + + get_ovly_info(&dd->config, &info, overlay); + + if (!overlay) { + ret = mcde_dss_enable_display(ddev); + if (ret) + goto fail_enable_display; + mcde_dss_get_video_mode(ddev, &vmode); + mcde_dss_try_video_mode(ddev, &vmode); + ret = mcde_dss_set_video_mode(ddev, &vmode); + if (ret) + goto fail_set_video_mode; + mcde_dss_set_pixel_format(ddev, info.fmt); + mcde_dss_apply_channel(ddev); + } + + dd->ovly = mcde_dss_create_overlay(ddev, &info); + if (!dd->ovly) { + ret = -ENOMEM; + goto fail_create_ovly; + } + + ret = misc_register(&dd->mdev); + if (ret) + goto fail_register_misc; + mutex_lock(&dev_list_lock); + list_add_tail(&dd->list, &dev_list); + mutex_unlock(&dev_list_lock); + + goto out; + +fail_register_misc: + mcde_dss_destroy_overlay(dd->ovly); +fail_create_ovly: + if (!overlay) + mcde_dss_disable_display(ddev); +fail_set_video_mode: +fail_enable_display: + kfree(dd); +out: + return ret; +} + +void dispdev_destroy(struct mcde_display_device *ddev) +{ + struct dispdev *dd; + struct dispdev *tmp; + + mutex_lock(&dev_list_lock); + list_for_each_entry_safe(dd, tmp, &dev_list, list) { + if (dd->ddev == ddev) { + list_del(&dd->list); + misc_deregister(&dd->mdev); + mcde_dss_destroy_overlay(dd->ovly); + /* + * TODO: Uncomment when DSS has reference + * counting of enable/disable + */ + /* mcde_dss_disable_display(dd->ddev); */ + kfree(dd); + break; + } + } + mutex_unlock(&dev_list_lock); +} + +static void dispdev_destroy_all(void) +{ + struct dispdev *dd; + struct dispdev *tmp; + + mutex_lock(&dev_list_lock); + list_for_each_entry_safe(dd, tmp, &dev_list, list) { + list_del(&dd->list); + misc_deregister(&dd->mdev); + mcde_dss_destroy_overlay(dd->ovly); + /* + * TODO: Uncomment when DSS has reference + * counting of enable/disable + */ + /* mcde_dss_disable_display(dd->ddev); */ + kfree(dd); + } + mutex_unlock(&dev_list_lock); + + mutex_destroy(&dev_list_lock); +} + +static int __init dispdev_init(void) +{ + pr_info("%s\n", __func__); + + mutex_init(&dev_list_lock); + + return 0; +} +module_init(dispdev_init); + +static void __exit dispdev_exit(void) +{ + dispdev_destroy_all(); + pr_info("%s\n", __func__); +} +module_exit(dispdev_exit); + +MODULE_AUTHOR("Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Display output device driver"); + diff --git a/drivers/misc/hwmem/Makefile b/drivers/misc/hwmem/Makefile new file mode 100644 index 00000000000..c307616a181 --- /dev/null +++ b/drivers/misc/hwmem/Makefile @@ -0,0 +1,3 @@ +hwmem-objs := hwmem-main.o hwmem-ioctl.o cache_handler.o contig_alloc.o + +obj-$(CONFIG_HWMEM) += hwmem.o diff --git a/drivers/misc/hwmem/cache_handler.c b/drivers/misc/hwmem/cache_handler.c new file mode 100644 index 00000000000..e0ab4ee6cf8 --- /dev/null +++ b/drivers/misc/hwmem/cache_handler.c @@ -0,0 +1,510 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * Cache handler + * + * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/hwmem.h> + +#include <asm/pgtable.h> + +#include <mach/dcache.h> + +#include "cache_handler.h" + +#define U32_MAX (~(u32)0) + +enum hwmem_alloc_flags cachi_get_cache_settings( + enum hwmem_alloc_flags requested_cache_settings); +void cachi_set_pgprot_cache_options(enum hwmem_alloc_flags cache_settings, + pgprot_t *pgprot); + +static void sync_buf_pre_cpu(struct cach_buf *buf, enum hwmem_access access, + struct hwmem_region *region); +static void sync_buf_post_cpu(struct cach_buf *buf, + enum hwmem_access next_access, struct hwmem_region *next_region); + +static void invalidate_cpu_cache(struct cach_buf *buf, + struct cach_range *range_2b_used); +static void clean_cpu_cache(struct cach_buf *buf, + struct cach_range *range_2b_used); +static void flush_cpu_cache(struct cach_buf *buf, + struct cach_range *range_2b_used); + +static void null_range(struct cach_range *range); +static void expand_range(struct cach_range *range, + struct cach_range *range_2_add); +/* + * Expands range to one of enclosing_range's two edges. The function will + * choose which of enclosing_range's edges to expand range to in such a + * way that the size of range is minimized. range must be located inside + * enclosing_range. + */ +static void expand_range_2_edge(struct cach_range *range, + struct cach_range *enclosing_range); +static void shrink_range(struct cach_range *range, + struct cach_range *range_2_remove); +static bool is_non_empty_range(struct cach_range *range); +static void intersect_range(struct cach_range *range_1, + struct cach_range *range_2, struct cach_range *intersection); +/* Align_up restrictions apply here to */ +static void align_range_up(struct cach_range *range, u32 alignment); +static u32 range_length(struct cach_range *range); +static void region_2_range(struct hwmem_region *region, u32 buffer_size, + struct cach_range *range); + +static void *offset_2_vaddr(struct cach_buf *buf, u32 offset); +static u32 offset_2_paddr(struct cach_buf *buf, u32 offset); + +/* Saturates, might return unaligned values when that happens */ +static u32 align_up(u32 value, u32 alignment); +static u32 align_down(u32 value, u32 alignment); + +/* + * Exported functions + */ + +void cach_init_buf(struct cach_buf *buf, enum hwmem_alloc_flags cache_settings, + u32 size) +{ + buf->vstart = NULL; + buf->pstart = 0; + buf->size = size; + + buf->cache_settings = cachi_get_cache_settings(cache_settings); +} + +void cach_set_buf_addrs(struct cach_buf *buf, void* vaddr, u32 paddr) +{ + bool tmp; + + buf->vstart = vaddr; + buf->pstart = paddr; + + if (buf->cache_settings & HWMEM_ALLOC_HINT_CACHED) { + /* + * Keep whatever is in the cache. This way we avoid an + * unnecessary synch if CPU is the first user. + */ + buf->range_in_cpu_cache.start = 0; + buf->range_in_cpu_cache.end = buf->size; + align_range_up(&buf->range_in_cpu_cache, + get_dcache_granularity()); + buf->range_dirty_in_cpu_cache.start = 0; + buf->range_dirty_in_cpu_cache.end = buf->size; + align_range_up(&buf->range_dirty_in_cpu_cache, + get_dcache_granularity()); + } else { + flush_cpu_dcache(buf->vstart, buf->pstart, buf->size, false, + &tmp); + drain_cpu_write_buf(); + + null_range(&buf->range_in_cpu_cache); + null_range(&buf->range_dirty_in_cpu_cache); + } + null_range(&buf->range_invalid_in_cpu_cache); +} + +void cach_set_pgprot_cache_options(struct cach_buf *buf, pgprot_t *pgprot) +{ + cachi_set_pgprot_cache_options(buf->cache_settings, pgprot); +} + +void cach_set_domain(struct cach_buf *buf, enum hwmem_access access, + enum hwmem_domain domain, struct hwmem_region *region) +{ + struct hwmem_region *__region; + struct hwmem_region full_region; + + if (region != NULL) { + __region = region; + } else { + full_region.offset = 0; + full_region.count = 1; + full_region.start = 0; + full_region.end = buf->size; + full_region.size = buf->size; + + __region = &full_region; + } + + switch (domain) { + case HWMEM_DOMAIN_SYNC: + sync_buf_post_cpu(buf, access, __region); + + break; + + case HWMEM_DOMAIN_CPU: + sync_buf_pre_cpu(buf, access, __region); + + break; + } +} + +/* + * Local functions + */ + +enum hwmem_alloc_flags __attribute__((weak)) cachi_get_cache_settings( + enum hwmem_alloc_flags requested_cache_settings) +{ + static const u32 CACHE_ON_FLAGS_MASK = HWMEM_ALLOC_HINT_CACHED | + HWMEM_ALLOC_HINT_CACHE_WB | HWMEM_ALLOC_HINT_CACHE_WT | + HWMEM_ALLOC_HINT_CACHE_NAOW | HWMEM_ALLOC_HINT_CACHE_AOW | + HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE | + HWMEM_ALLOC_HINT_INNER_CACHE_ONLY; + /* We don't know the cache setting so we assume worst case. */ + static const u32 CACHE_SETTING = HWMEM_ALLOC_HINT_WRITE_COMBINE | + HWMEM_ALLOC_HINT_CACHED | HWMEM_ALLOC_HINT_CACHE_WB | + HWMEM_ALLOC_HINT_CACHE_AOW | + HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE; + + if (requested_cache_settings & CACHE_ON_FLAGS_MASK) + return CACHE_SETTING; + else if (requested_cache_settings & HWMEM_ALLOC_HINT_WRITE_COMBINE || + (requested_cache_settings & HWMEM_ALLOC_HINT_UNCACHED && + !(requested_cache_settings & + HWMEM_ALLOC_HINT_NO_WRITE_COMBINE))) + return HWMEM_ALLOC_HINT_WRITE_COMBINE; + else if (requested_cache_settings & + (HWMEM_ALLOC_HINT_NO_WRITE_COMBINE | + HWMEM_ALLOC_HINT_UNCACHED)) + return 0; + else + /* Nothing specified, use cached */ + return CACHE_SETTING; +} + +void __attribute__((weak)) cachi_set_pgprot_cache_options( + enum hwmem_alloc_flags cache_settings, pgprot_t *pgprot) +{ + if (cache_settings & HWMEM_ALLOC_HINT_CACHED) + *pgprot = *pgprot; /* To silence compiler and checkpatch */ + else if (cache_settings & HWMEM_ALLOC_HINT_WRITE_COMBINE) + *pgprot = pgprot_writecombine(*pgprot); + else + *pgprot = pgprot_noncached(*pgprot); +} + +bool __attribute__((weak)) speculative_data_prefetch(void) +{ + /* We don't know so we go with the safe alternative */ + return true; +} + +static void sync_buf_pre_cpu(struct cach_buf *buf, enum hwmem_access access, + struct hwmem_region *region) +{ + bool write = access & HWMEM_ACCESS_WRITE; + bool read = access & HWMEM_ACCESS_READ; + + if (!write && !read) + return; + + if (buf->cache_settings & HWMEM_ALLOC_HINT_CACHED) { + struct cach_range region_range; + + region_2_range(region, buf->size, ®ion_range); + + if (read || (write && buf->cache_settings & + HWMEM_ALLOC_HINT_CACHE_WB)) + /* Perform defered invalidates */ + invalidate_cpu_cache(buf, ®ion_range); + if (read || (write && buf->cache_settings & + HWMEM_ALLOC_HINT_CACHE_AOW)) + expand_range(&buf->range_in_cpu_cache, ®ion_range); + if (write && buf->cache_settings & HWMEM_ALLOC_HINT_CACHE_WB) { + struct cach_range dirty_range_addition; + + if (buf->cache_settings & HWMEM_ALLOC_HINT_CACHE_AOW) + dirty_range_addition = region_range; + else + intersect_range(&buf->range_in_cpu_cache, + ®ion_range, &dirty_range_addition); + + expand_range(&buf->range_dirty_in_cpu_cache, + &dirty_range_addition); + } + } + if (buf->cache_settings & HWMEM_ALLOC_HINT_WRITE_COMBINE) { + if (write) + buf->in_cpu_write_buf = true; + } +} + +static void sync_buf_post_cpu(struct cach_buf *buf, + enum hwmem_access next_access, struct hwmem_region *next_region) +{ + bool write = next_access & HWMEM_ACCESS_WRITE; + bool read = next_access & HWMEM_ACCESS_READ; + struct cach_range region_range; + + if (!write && !read) + return; + + region_2_range(next_region, buf->size, ®ion_range); + + if (write) { + if (speculative_data_prefetch()) { + /* Defer invalidate */ + struct cach_range intersection; + + intersect_range(&buf->range_in_cpu_cache, + ®ion_range, &intersection); + + expand_range(&buf->range_invalid_in_cpu_cache, + &intersection); + + clean_cpu_cache(buf, ®ion_range); + } else { + flush_cpu_cache(buf, ®ion_range); + } + } + if (read) + clean_cpu_cache(buf, ®ion_range); + + if (buf->in_cpu_write_buf) { + drain_cpu_write_buf(); + + buf->in_cpu_write_buf = false; + } +} + +static void invalidate_cpu_cache(struct cach_buf *buf, struct cach_range *range) +{ + struct cach_range intersection; + + intersect_range(&buf->range_invalid_in_cpu_cache, range, + &intersection); + if (is_non_empty_range(&intersection)) { + bool flushed_everything; + + expand_range_2_edge(&intersection, + &buf->range_invalid_in_cpu_cache); + + /* + * Cache handler never uses invalidate to discard data in the + * cache so we can use flush instead which is considerably + * faster for large buffers. + */ + flush_cpu_dcache( + offset_2_vaddr(buf, intersection.start), + offset_2_paddr(buf, intersection.start), + range_length(&intersection), + buf->cache_settings & + HWMEM_ALLOC_HINT_INNER_CACHE_ONLY, + &flushed_everything); + + if (flushed_everything) { + null_range(&buf->range_invalid_in_cpu_cache); + null_range(&buf->range_dirty_in_cpu_cache); + } else { + /* + * No need to shrink range_in_cpu_cache as invalidate + * is only used when we can't keep track of what's in + * the CPU cache. + */ + shrink_range(&buf->range_invalid_in_cpu_cache, + &intersection); + } + } +} + +static void clean_cpu_cache(struct cach_buf *buf, struct cach_range *range) +{ + struct cach_range intersection; + + intersect_range(&buf->range_dirty_in_cpu_cache, range, &intersection); + if (is_non_empty_range(&intersection)) { + bool cleaned_everything; + + expand_range_2_edge(&intersection, + &buf->range_dirty_in_cpu_cache); + + clean_cpu_dcache( + offset_2_vaddr(buf, intersection.start), + offset_2_paddr(buf, intersection.start), + range_length(&intersection), + buf->cache_settings & + HWMEM_ALLOC_HINT_INNER_CACHE_ONLY, + &cleaned_everything); + + if (cleaned_everything) + null_range(&buf->range_dirty_in_cpu_cache); + else + shrink_range(&buf->range_dirty_in_cpu_cache, + &intersection); + } +} + +static void flush_cpu_cache(struct cach_buf *buf, struct cach_range *range) +{ + struct cach_range intersection; + + intersect_range(&buf->range_in_cpu_cache, range, &intersection); + if (is_non_empty_range(&intersection)) { + bool flushed_everything; + + expand_range_2_edge(&intersection, &buf->range_in_cpu_cache); + + flush_cpu_dcache( + offset_2_vaddr(buf, intersection.start), + offset_2_paddr(buf, intersection.start), + range_length(&intersection), + buf->cache_settings & + HWMEM_ALLOC_HINT_INNER_CACHE_ONLY, + &flushed_everything); + + if (flushed_everything) { + if (!speculative_data_prefetch()) + null_range(&buf->range_in_cpu_cache); + null_range(&buf->range_dirty_in_cpu_cache); + null_range(&buf->range_invalid_in_cpu_cache); + } else { + if (!speculative_data_prefetch()) + shrink_range(&buf->range_in_cpu_cache, + &intersection); + shrink_range(&buf->range_dirty_in_cpu_cache, + &intersection); + shrink_range(&buf->range_invalid_in_cpu_cache, + &intersection); + } + } +} + +static void null_range(struct cach_range *range) +{ + range->start = U32_MAX; + range->end = 0; +} + +static void expand_range(struct cach_range *range, + struct cach_range *range_2_add) +{ + range->start = min(range->start, range_2_add->start); + range->end = max(range->end, range_2_add->end); +} + +/* + * Expands range to one of enclosing_range's two edges. The function will + * choose which of enclosing_range's edges to expand range to in such a + * way that the size of range is minimized. range must be located inside + * enclosing_range. + */ +static void expand_range_2_edge(struct cach_range *range, + struct cach_range *enclosing_range) +{ + u32 space_on_low_side = range->start - enclosing_range->start; + u32 space_on_high_side = enclosing_range->end - range->end; + + if (space_on_low_side < space_on_high_side) + range->start = enclosing_range->start; + else + range->end = enclosing_range->end; +} + +static void shrink_range(struct cach_range *range, + struct cach_range *range_2_remove) +{ + if (range_2_remove->start > range->start) + range->end = min(range->end, range_2_remove->start); + else + range->start = max(range->start, range_2_remove->end); + + if (range->start >= range->end) + null_range(range); +} + +static bool is_non_empty_range(struct cach_range *range) +{ + return range->end > range->start; +} + +static void intersect_range(struct cach_range *range_1, + struct cach_range *range_2, struct cach_range *intersection) +{ + intersection->start = max(range_1->start, range_2->start); + intersection->end = min(range_1->end, range_2->end); + + if (intersection->start >= intersection->end) + null_range(intersection); +} + +/* Align_up restrictions apply here to */ +static void align_range_up(struct cach_range *range, u32 alignment) +{ + if (!is_non_empty_range(range)) + return; + + range->start = align_down(range->start, alignment); + range->end = align_up(range->end, alignment); +} + +static u32 range_length(struct cach_range *range) +{ + if (is_non_empty_range(range)) + return range->end - range->start; + else + return 0; +} + +static void region_2_range(struct hwmem_region *region, u32 buffer_size, + struct cach_range *range) +{ + /* + * We don't care about invalid regions, instead we limit the region's + * range to the buffer's range. This should work good enough, worst + * case we synch the entire buffer when we get an invalid region which + * is acceptable. + */ + range->start = region->offset + region->start; + range->end = min(region->offset + (region->count * region->size) - + (region->size - region->end), buffer_size); + if (range->start >= range->end) { + null_range(range); + return; + } + + align_range_up(range, get_dcache_granularity()); +} + +static void *offset_2_vaddr(struct cach_buf *buf, u32 offset) +{ + return (void *)((u32)buf->vstart + offset); +} + +static u32 offset_2_paddr(struct cach_buf *buf, u32 offset) +{ + return buf->pstart + offset; +} + +/* Saturates, might return unaligned values when that happens */ +static u32 align_up(u32 value, u32 alignment) +{ + u32 remainder = value % alignment; + u32 value_2_add; + + if (remainder == 0) + return value; + + value_2_add = alignment - remainder; + + if (value_2_add > U32_MAX - value) /* Will overflow */ + return U32_MAX; + + return value + value_2_add; +} + +static u32 align_down(u32 value, u32 alignment) +{ + u32 remainder = value % alignment; + if (remainder == 0) + return value; + + return value - remainder; +} diff --git a/drivers/misc/hwmem/cache_handler.h b/drivers/misc/hwmem/cache_handler.h new file mode 100644 index 00000000000..792105196fa --- /dev/null +++ b/drivers/misc/hwmem/cache_handler.h @@ -0,0 +1,61 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * Cache handler + * + * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +/* + * Cache handler can not handle simultaneous execution! The caller has to + * ensure such a situation does not occur. + */ + +#ifndef _CACHE_HANDLER_H_ +#define _CACHE_HANDLER_H_ + +#include <linux/types.h> +#include <linux/hwmem.h> + +/* + * To not have to double all datatypes we've used hwmem datatypes. If someone + * want's to use cache handler but not hwmem then we'll have to define our own + * datatypes. + */ + +struct cach_range { + u32 start; /* Inclusive */ + u32 end; /* Exclusive */ +}; + +/* + * Internal, do not touch! + */ +struct cach_buf { + void *vstart; + u32 pstart; + u32 size; + + /* Remaining hints are active */ + enum hwmem_alloc_flags cache_settings; + + bool in_cpu_write_buf; + struct cach_range range_in_cpu_cache; + struct cach_range range_dirty_in_cpu_cache; + struct cach_range range_invalid_in_cpu_cache; +}; + +void cach_init_buf(struct cach_buf *buf, + enum hwmem_alloc_flags cache_settings, u32 size); + +void cach_set_buf_addrs(struct cach_buf *buf, void* vaddr, u32 paddr); + +void cach_set_pgprot_cache_options(struct cach_buf *buf, pgprot_t *pgprot); + +void cach_set_domain(struct cach_buf *buf, enum hwmem_access access, + enum hwmem_domain domain, struct hwmem_region *region); + +#endif /* _CACHE_HANDLER_H_ */ diff --git a/drivers/misc/hwmem/contig_alloc.c b/drivers/misc/hwmem/contig_alloc.c new file mode 100644 index 00000000000..bc71ca08f0f --- /dev/null +++ b/drivers/misc/hwmem/contig_alloc.c @@ -0,0 +1,468 @@ +/* + * Copyright (C) ST-Ericsson SA 2011 + * + * Contiguous memory allocator + * + * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>, + * Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/debugfs.h> +#include <linux/uaccess.h> +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <asm/sizes.h> + +#define MAX_INSTANCE_NAME_LENGTH 31 + +struct alloc { + struct list_head list; + + bool in_use; + phys_addr_t paddr; + size_t size; +}; + +struct instance { + struct list_head list; + + char name[MAX_INSTANCE_NAME_LENGTH + 1]; + + phys_addr_t region_paddr; + void *region_kaddr; + size_t region_size; + + struct list_head alloc_list; + +#ifdef CONFIG_DEBUG_FS + struct inode *debugfs_inode; +#endif /* #ifdef CONFIG_DEBUG_FS */ +}; + +static LIST_HEAD(instance_list); + +static DEFINE_MUTEX(lock); + +void *cona_create(const char *name, phys_addr_t region_paddr, + size_t region_size); +void *cona_alloc(void *instance, size_t size); +void cona_free(void *instance, void *alloc); +phys_addr_t cona_get_alloc_paddr(void *alloc); +void *cona_get_alloc_kaddr(void *instance, void *alloc); +size_t cona_get_alloc_size(void *alloc); + +static int init_alloc_list(struct instance *instance); +static void clean_alloc_list(struct instance *instance); +static struct alloc *find_free_alloc_bestfit(struct instance *instance, + size_t size); +static struct alloc *split_allocation(struct alloc *alloc, + size_t new_alloc_size); +static phys_addr_t get_alloc_offset(struct instance *instance, + struct alloc *alloc); + +void *cona_create(const char *name, phys_addr_t region_paddr, + size_t region_size) +{ + int ret; + struct instance *instance; + struct vm_struct *vm_area; + + if (region_size == 0) + return ERR_PTR(-EINVAL); + + instance = kzalloc(sizeof(*instance), GFP_KERNEL); + if (instance == NULL) + return ERR_PTR(-ENOMEM); + + memcpy(instance->name, name, MAX_INSTANCE_NAME_LENGTH + 1); + /* Truncate name if necessary */ + instance->name[MAX_INSTANCE_NAME_LENGTH] = '\0'; + instance->region_paddr = region_paddr; + instance->region_size = region_size; + + vm_area = get_vm_area(region_size, VM_IOREMAP); + if (vm_area == NULL) { + printk(KERN_WARNING "CONA: Failed to allocate %u bytes" + " kernel virtual memory", region_size); + ret = -ENOMSG; + goto vmem_alloc_failed; + } + instance->region_kaddr = vm_area->addr; + + INIT_LIST_HEAD(&instance->alloc_list); + ret = init_alloc_list(instance); + if (ret < 0) + goto init_alloc_list_failed; + + mutex_lock(&lock); + list_add_tail(&instance->list, &instance_list); + mutex_unlock(&lock); + + return instance; + +init_alloc_list_failed: + vm_area = remove_vm_area(instance->region_kaddr); + if (vm_area == NULL) + printk(KERN_ERR "CONA: Failed to free kernel virtual memory," + " resource leak!\n"); + + kfree(vm_area); +vmem_alloc_failed: + kfree(instance); + + return ERR_PTR(ret); +} + +void *cona_alloc(void *instance, size_t size) +{ + struct instance *instance_l = (struct instance *)instance; + struct alloc *alloc; + + if (size == 0) + return ERR_PTR(-EINVAL); + + mutex_lock(&lock); + + alloc = find_free_alloc_bestfit(instance_l, size); + if (IS_ERR(alloc)) + goto out; + if (size < alloc->size) { + alloc = split_allocation(alloc, size); + if (IS_ERR(alloc)) + goto out; + } else { + alloc->in_use = true; + } + +out: + mutex_unlock(&lock); + + return alloc; +} + +void cona_free(void *instance, void *alloc) +{ + struct instance *instance_l = (struct instance *)instance; + struct alloc *alloc_l = (struct alloc *)alloc; + struct alloc *other; + + mutex_lock(&lock); + + alloc_l->in_use = false; + + other = list_entry(alloc_l->list.prev, struct alloc, list); + if ((alloc_l->list.prev != &instance_l->alloc_list) && + !other->in_use) { + other->size += alloc_l->size; + list_del(&alloc_l->list); + kfree(alloc_l); + alloc_l = other; + } + other = list_entry(alloc_l->list.next, struct alloc, list); + if ((alloc_l->list.next != &instance_l->alloc_list) && + !other->in_use) { + alloc_l->size += other->size; + list_del(&other->list); + kfree(other); + } + + mutex_unlock(&lock); +} + +phys_addr_t cona_get_alloc_paddr(void *alloc) +{ + return ((struct alloc *)alloc)->paddr; +} + +void *cona_get_alloc_kaddr(void *instance, void *alloc) +{ + struct instance *instance_l = (struct instance *)instance; + + return instance_l->region_kaddr + get_alloc_offset(instance_l, + (struct alloc *)alloc); +} + +size_t cona_get_alloc_size(void *alloc) +{ + return ((struct alloc *)alloc)->size; +} + +static int init_alloc_list(struct instance *instance) +{ + /* + * Hack to not get any allocs that cross a 64MiB boundary as B2R2 can't + * handle that. + */ + int ret; + u32 curr_pos = instance->region_paddr; + u32 region_end = instance->region_paddr + instance->region_size; + u32 next_64mib_boundary = (curr_pos + SZ_64M) & ~(SZ_64M - 1); + struct alloc *alloc; + + if (PAGE_SIZE >= SZ_64M) { + printk(KERN_WARNING "CONA: PAGE_SIZE >= 64MiB\n"); + return -ENOMSG; + } + + while (next_64mib_boundary < region_end) { + if (next_64mib_boundary - curr_pos > PAGE_SIZE) { + alloc = kzalloc(sizeof(struct alloc), GFP_KERNEL); + if (alloc == NULL) { + ret = -ENOMEM; + goto error; + } + alloc->paddr = curr_pos; + alloc->size = next_64mib_boundary - curr_pos - + PAGE_SIZE; + alloc->in_use = false; + list_add_tail(&alloc->list, &instance->alloc_list); + curr_pos = alloc->paddr + alloc->size; + } + + alloc = kzalloc(sizeof(struct alloc), GFP_KERNEL); + if (alloc == NULL) { + ret = -ENOMEM; + goto error; + } + alloc->paddr = curr_pos; + alloc->size = PAGE_SIZE; + alloc->in_use = true; + list_add_tail(&alloc->list, &instance->alloc_list); + curr_pos = alloc->paddr + alloc->size; + + next_64mib_boundary += SZ_64M; + } + + alloc = kzalloc(sizeof(struct alloc), GFP_KERNEL); + if (alloc == NULL) { + ret = -ENOMEM; + goto error; + } + alloc->paddr = curr_pos; + alloc->size = region_end - curr_pos; + alloc->in_use = false; + list_add_tail(&alloc->list, &instance->alloc_list); + + return 0; + +error: + clean_alloc_list(instance); + + return ret; +} + +static void clean_alloc_list(struct instance *instance) +{ + while (list_empty(&instance->alloc_list) == 0) { + struct alloc *i = list_first_entry(&instance->alloc_list, + struct alloc, list); + + list_del(&i->list); + + kfree(i); + } +} + +static struct alloc *find_free_alloc_bestfit(struct instance *instance, + size_t size) +{ + size_t best_diff = ~(size_t)0; + struct alloc *alloc = NULL, *i; + + list_for_each_entry(i, &instance->alloc_list, list) { + size_t diff = i->size - size; + if (i->in_use || i->size < size) + continue; + if (diff < best_diff) { + alloc = i; + best_diff = diff; + } + } + + return alloc != NULL ? alloc : ERR_PTR(-ENOMEM); +} + +static struct alloc *split_allocation(struct alloc *alloc, + size_t new_alloc_size) +{ + struct alloc *new_alloc; + + new_alloc = kzalloc(sizeof(struct alloc), GFP_KERNEL); + if (new_alloc == NULL) + return ERR_PTR(-ENOMEM); + + new_alloc->in_use = true; + new_alloc->paddr = alloc->paddr; + new_alloc->size = new_alloc_size; + alloc->size -= new_alloc_size; + alloc->paddr += new_alloc_size; + + list_add_tail(&new_alloc->list, &alloc->list); + + return new_alloc; +} + +static phys_addr_t get_alloc_offset(struct instance *instance, + struct alloc *alloc) +{ + return alloc->paddr - instance->region_paddr; +} + +/* Debug */ + +#ifdef CONFIG_DEBUG_FS + +static int print_alloc(struct alloc *alloc, char **buf, size_t buf_size); +static struct instance *get_instance_from_file(struct file *file); +static int debugfs_allocs_read(struct file *filp, char __user *buf, + size_t count, loff_t *f_pos); + +static const struct file_operations debugfs_allocs_fops = { + .owner = THIS_MODULE, + .read = debugfs_allocs_read, +}; + +static int print_alloc(struct alloc *alloc, char **buf, size_t buf_size) +{ + int ret; + int i; + + for (i = 0; i < 2; i++) { + size_t buf_size_l; + if (i == 0) + buf_size_l = 0; + else + buf_size_l = buf_size; + + ret = snprintf(*buf, buf_size_l, "paddr: %10x\tsize: %10u\t" + "in use: %1u\n", alloc->paddr, alloc->size, + alloc->in_use); + if (ret < 0) + return -ENOMSG; + else if (ret + 1 > buf_size) + return -EINVAL; + } + + *buf += ret; + + return 0; +} + +static struct instance *get_instance_from_file(struct file *file) +{ + struct instance *curr_instance; + + list_for_each_entry(curr_instance, &instance_list, list) { + if (file->f_dentry->d_inode == curr_instance->debugfs_inode) + return curr_instance; + } + + return ERR_PTR(-ENOENT); +} + +static int debugfs_allocs_read(struct file *file, char __user *buf, + size_t count, loff_t *f_pos) +{ + /* + * We assume the supplied buffer and PAGE_SIZE is large enough to hold + * information about at least one alloc, if not no data will be + * returned. + */ + + int ret; + struct instance *instance; + struct alloc *curr_alloc; + char *local_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + char *local_buf_pos = local_buf; + size_t available_space = min((size_t)PAGE_SIZE, count); + /* private_data is intialized to NULL in open which I assume is 0. */ + void **curr_pos = &file->private_data; + size_t bytes_read; + + if (local_buf == NULL) + return -ENOMEM; + + mutex_lock(&lock); + + instance = get_instance_from_file(file); + if (IS_ERR(instance)) { + ret = PTR_ERR(instance); + goto out; + } + + list_for_each_entry(curr_alloc, &instance->alloc_list, list) { + phys_addr_t alloc_offset = get_alloc_offset(instance, + curr_alloc); + if (alloc_offset < (phys_addr_t)*curr_pos) + continue; + + ret = print_alloc(curr_alloc, &local_buf_pos, available_space - + (size_t)(local_buf_pos - local_buf)); + if (ret == -EINVAL) /* No more room */ + break; + else if (ret < 0) + goto out; + + /* + * There could be an overflow issue here in the unlikely case + * where the region is placed at the end of the address range + * and the last alloc is 1 byte large. Since this is debug code + * and that case most likely never will happen I've chosen to + * defer fixing it till it happens. + */ + *curr_pos = (void *)(alloc_offset + 1); + } + + bytes_read = (size_t)(local_buf_pos - local_buf); + + ret = copy_to_user(buf, local_buf, bytes_read); + if (ret < 0) + goto out; + + ret = bytes_read; + +out: + kfree(local_buf); + + mutex_unlock(&lock); + + return ret; +} + +static int __init init_debugfs(void) +{ + struct instance *curr_instance; + struct dentry *debugfs_root_dir = debugfs_create_dir("cona", NULL); + + mutex_lock(&lock); + + list_for_each_entry(curr_instance, &instance_list, list) { + struct dentry *file_dentry; + char tmp_str[MAX_INSTANCE_NAME_LENGTH + 7 + 1]; + tmp_str[0] = '\0'; + strcat(tmp_str, curr_instance->name); + strcat(tmp_str, "_allocs"); + file_dentry = debugfs_create_file(tmp_str, 0444, + debugfs_root_dir, 0, &debugfs_allocs_fops); + if (file_dentry != NULL) + curr_instance->debugfs_inode = file_dentry->d_inode; + } + + mutex_unlock(&lock); + + return 0; +} +/* + * Must be executed after all instances have been created, hence the + * late_initcall. + */ +late_initcall(init_debugfs); + +#endif /* #ifdef CONFIG_DEBUG_FS */ diff --git a/drivers/misc/hwmem/hwmem-ioctl.c b/drivers/misc/hwmem/hwmem-ioctl.c new file mode 100644 index 00000000000..e9e50de78bd --- /dev/null +++ b/drivers/misc/hwmem/hwmem-ioctl.c @@ -0,0 +1,532 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * Hardware memory driver, hwmem + * + * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/kernel.h> +#include <linux/fs.h> +#include <linux/idr.h> +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/miscdevice.h> +#include <linux/uaccess.h> +#include <linux/mm_types.h> +#include <linux/hwmem.h> +#include <linux/device.h> +#include <linux/sched.h> + +static int hwmem_open(struct inode *inode, struct file *file); +static int hwmem_ioctl_mmap(struct file *file, struct vm_area_struct *vma); +static int hwmem_release_fop(struct inode *inode, struct file *file); +static long hwmem_ioctl(struct file *file, unsigned int cmd, + unsigned long arg); +static unsigned long hwmem_get_unmapped_area(struct file *file, + unsigned long addr, unsigned long len, unsigned long pgoff, + unsigned long flags); + +static const struct file_operations hwmem_fops = { + .open = hwmem_open, + .mmap = hwmem_ioctl_mmap, + .unlocked_ioctl = hwmem_ioctl, + .release = hwmem_release_fop, + .get_unmapped_area = hwmem_get_unmapped_area, +}; + +static struct miscdevice hwmem_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "hwmem", + .fops = &hwmem_fops, +}; + +struct hwmem_file { + struct mutex lock; + struct idr idr; /* id -> struct hwmem_alloc*, ref counted */ + struct hwmem_alloc *fd_alloc; /* Ref counted */ +}; + +static s32 create_id(struct hwmem_file *hwfile, struct hwmem_alloc *alloc) +{ + int id, ret; + + while (true) { + if (idr_pre_get(&hwfile->idr, GFP_KERNEL) == 0) + return -ENOMEM; + + ret = idr_get_new_above(&hwfile->idr, alloc, 1, &id); + if (ret == 0) + break; + else if (ret != -EAGAIN) + return -ENOMEM; + } + + /* + * IDR always returns the lowest free id so there is no wrapping issue + * because of this. + */ + if (id >= (s32)1 << (31 - PAGE_SHIFT)) { + dev_err(hwmem_device.this_device, "Out of IDs!\n"); + idr_remove(&hwfile->idr, id); + return -ENOMSG; + } + + return (s32)id << PAGE_SHIFT; +} + +static void remove_id(struct hwmem_file *hwfile, s32 id) +{ + idr_remove(&hwfile->idr, id >> PAGE_SHIFT); +} + +static struct hwmem_alloc *resolve_id(struct hwmem_file *hwfile, s32 id) +{ + struct hwmem_alloc *alloc; + + alloc = id ? idr_find(&hwfile->idr, id >> PAGE_SHIFT) : + hwfile->fd_alloc; + if (alloc == NULL) + alloc = ERR_PTR(-EINVAL); + + return alloc; +} + +static s32 alloc(struct hwmem_file *hwfile, struct hwmem_alloc_request *req) +{ + s32 ret = 0; + struct hwmem_alloc *alloc; + + alloc = hwmem_alloc(req->size, req->flags, req->default_access, + req->mem_type); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + ret = create_id(hwfile, alloc); + if (ret < 0) + hwmem_release(alloc); + + return ret; +} + +static int alloc_fd(struct hwmem_file *hwfile, struct hwmem_alloc_request *req) +{ + struct hwmem_alloc *alloc; + + if (hwfile->fd_alloc) + return -EINVAL; + + alloc = hwmem_alloc(req->size, req->flags, req->default_access, + req->mem_type); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + hwfile->fd_alloc = alloc; + + return 0; +} + +static int release(struct hwmem_file *hwfile, s32 id) +{ + struct hwmem_alloc *alloc; + + if (id == 0) + return -EINVAL; + + alloc = resolve_id(hwfile, id); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + remove_id(hwfile, id); + hwmem_release(alloc); + + return 0; +} + +static int set_cpu_domain(struct hwmem_file *hwfile, + struct hwmem_set_domain_request *req) +{ + struct hwmem_alloc *alloc; + + alloc = resolve_id(hwfile, req->id); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + return hwmem_set_domain(alloc, req->access, HWMEM_DOMAIN_CPU, + (struct hwmem_region *)&req->region); +} + +static int set_sync_domain(struct hwmem_file *hwfile, + struct hwmem_set_domain_request *req) +{ + struct hwmem_alloc *alloc; + + alloc = resolve_id(hwfile, req->id); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + return hwmem_set_domain(alloc, req->access, HWMEM_DOMAIN_SYNC, + (struct hwmem_region *)&req->region); +} + +static int pin(struct hwmem_file *hwfile, struct hwmem_pin_request *req) +{ + int ret; + struct hwmem_alloc *alloc; + enum hwmem_mem_type mem_type; + struct hwmem_mem_chunk mem_chunk; + size_t mem_chunk_length = 1; + + alloc = resolve_id(hwfile, req->id); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + hwmem_get_info(alloc, NULL, &mem_type, NULL); + if (mem_type != HWMEM_MEM_CONTIGUOUS_SYS) + return -EINVAL; + + ret = hwmem_pin(alloc, &mem_chunk, &mem_chunk_length); + if (ret < 0) + return ret; + + req->phys_addr = mem_chunk.paddr; + + return 0; +} + +static int unpin(struct hwmem_file *hwfile, s32 id) +{ + struct hwmem_alloc *alloc; + + alloc = resolve_id(hwfile, id); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + hwmem_unpin(alloc); + + return 0; +} + +static int set_access(struct hwmem_file *hwfile, + struct hwmem_set_access_request *req) +{ + struct hwmem_alloc *alloc; + + alloc = resolve_id(hwfile, req->id); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + return hwmem_set_access(alloc, req->access, req->pid); +} + +static int get_info(struct hwmem_file *hwfile, + struct hwmem_get_info_request *req) +{ + struct hwmem_alloc *alloc; + + alloc = resolve_id(hwfile, req->id); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + hwmem_get_info(alloc, &req->size, &req->mem_type, &req->access); + + return 0; +} + +static s32 export(struct hwmem_file *hwfile, s32 id) +{ + s32 ret; + struct hwmem_alloc *alloc; + enum hwmem_access access; + + alloc = resolve_id(hwfile, id); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + /* + * The user could be about to send the buffer to a driver but + * there is a chance the current thread group don't have import rights + * if it gained access to the buffer via a inter-process fd transfer + * (fork, Android binder), if this is the case the driver will not be + * able to resolve the buffer name. To avoid this situation we give the + * current thread group import rights. This will not breach the + * security as the process already has access to the buffer (otherwise + * it would not be able to get here). + */ + hwmem_get_info(alloc, NULL, NULL, &access); + + ret = hwmem_set_access(alloc, (access | HWMEM_ACCESS_IMPORT), + task_tgid_nr(current)); + if (ret < 0) + return ret; + + return hwmem_get_name(alloc); +} + +static s32 import(struct hwmem_file *hwfile, s32 name) +{ + s32 ret = 0; + struct hwmem_alloc *alloc; + enum hwmem_access access; + + alloc = hwmem_resolve_by_name(name); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + /* Check access permissions for process */ + hwmem_get_info(alloc, NULL, NULL, &access); + if (!(access & HWMEM_ACCESS_IMPORT)) { + ret = -EPERM; + goto error; + } + + ret = create_id(hwfile, alloc); + if (ret < 0) + goto error; + + return ret; + +error: + hwmem_release(alloc); + + return ret; +} + +static int import_fd(struct hwmem_file *hwfile, s32 name) +{ + int ret; + struct hwmem_alloc *alloc; + enum hwmem_access access; + + if (hwfile->fd_alloc) + return -EINVAL; + + alloc = hwmem_resolve_by_name(name); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + /* Check access permissions for process */ + hwmem_get_info(alloc, NULL, NULL, &access); + if (!(access & HWMEM_ACCESS_IMPORT)) { + ret = -EPERM; + goto error; + } + + hwfile->fd_alloc = alloc; + + return 0; + +error: + hwmem_release(alloc); + + return ret; +} + +static int hwmem_open(struct inode *inode, struct file *file) +{ + struct hwmem_file *hwfile; + + hwfile = kzalloc(sizeof(struct hwmem_file), GFP_KERNEL); + if (hwfile == NULL) + return -ENOMEM; + + idr_init(&hwfile->idr); + mutex_init(&hwfile->lock); + file->private_data = hwfile; + + return 0; +} + +static int hwmem_ioctl_mmap(struct file *file, struct vm_area_struct *vma) +{ + int ret; + struct hwmem_file *hwfile = (struct hwmem_file *)file->private_data; + struct hwmem_alloc *alloc; + + mutex_lock(&hwfile->lock); + + alloc = resolve_id(hwfile, (s32)vma->vm_pgoff << PAGE_SHIFT); + if (IS_ERR(alloc)) { + ret = PTR_ERR(alloc); + goto out; + } + + ret = hwmem_mmap(alloc, vma); + +out: + mutex_unlock(&hwfile->lock); + + return ret; +} + +static int hwmem_release_idr_for_each_wrapper(int id, void *ptr, void *data) +{ + hwmem_release((struct hwmem_alloc *)ptr); + + return 0; +} + +static int hwmem_release_fop(struct inode *inode, struct file *file) +{ + struct hwmem_file *hwfile = (struct hwmem_file *)file->private_data; + + idr_for_each(&hwfile->idr, hwmem_release_idr_for_each_wrapper, NULL); + idr_remove_all(&hwfile->idr); + idr_destroy(&hwfile->idr); + + if (hwfile->fd_alloc) + hwmem_release(hwfile->fd_alloc); + + mutex_destroy(&hwfile->lock); + + kfree(hwfile); + + return 0; +} + +static long hwmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + int ret = -ENOSYS; + struct hwmem_file *hwfile = (struct hwmem_file *)file->private_data; + + mutex_lock(&hwfile->lock); + + switch (cmd) { + case HWMEM_ALLOC_IOC: + { + struct hwmem_alloc_request req; + if (copy_from_user(&req, (void __user *)arg, + sizeof(struct hwmem_alloc_request))) + ret = -EFAULT; + else + ret = alloc(hwfile, &req); + } + break; + case HWMEM_ALLOC_FD_IOC: + { + struct hwmem_alloc_request req; + if (copy_from_user(&req, (void __user *)arg, + sizeof(struct hwmem_alloc_request))) + ret = -EFAULT; + else + ret = alloc_fd(hwfile, &req); + } + break; + case HWMEM_RELEASE_IOC: + ret = release(hwfile, (s32)arg); + break; + case HWMEM_SET_CPU_DOMAIN_IOC: + { + struct hwmem_set_domain_request req; + if (copy_from_user(&req, (void __user *)arg, + sizeof(struct hwmem_set_domain_request))) + ret = -EFAULT; + else + ret = set_cpu_domain(hwfile, &req); + } + break; + case HWMEM_SET_SYNC_DOMAIN_IOC: + { + struct hwmem_set_domain_request req; + if (copy_from_user(&req, (void __user *)arg, + sizeof(struct hwmem_set_domain_request))) + ret = -EFAULT; + else + ret = set_sync_domain(hwfile, &req); + } + break; + case HWMEM_PIN_IOC: + { + struct hwmem_pin_request req; + if (copy_from_user(&req, (void __user *)arg, + sizeof(struct hwmem_pin_request))) + ret = -EFAULT; + else + ret = pin(hwfile, &req); + if (ret == 0 && copy_to_user((void __user *)arg, &req, + sizeof(struct hwmem_pin_request))) + ret = -EFAULT; + } + break; + case HWMEM_UNPIN_IOC: + ret = unpin(hwfile, (s32)arg); + break; + case HWMEM_SET_ACCESS_IOC: + { + struct hwmem_set_access_request req; + if (copy_from_user(&req, (void __user *)arg, + sizeof(struct hwmem_set_access_request))) + ret = -EFAULT; + else + ret = set_access(hwfile, &req); + } + break; + case HWMEM_GET_INFO_IOC: + { + struct hwmem_get_info_request req; + if (copy_from_user(&req, (void __user *)arg, + sizeof(struct hwmem_get_info_request))) + ret = -EFAULT; + else + ret = get_info(hwfile, &req); + if (ret == 0 && copy_to_user((void __user *)arg, &req, + sizeof(struct hwmem_get_info_request))) + ret = -EFAULT; + } + break; + case HWMEM_EXPORT_IOC: + ret = export(hwfile, (s32)arg); + break; + case HWMEM_IMPORT_IOC: + ret = import(hwfile, (s32)arg); + break; + case HWMEM_IMPORT_FD_IOC: + ret = import_fd(hwfile, (s32)arg); + break; + } + + mutex_unlock(&hwfile->lock); + + return ret; +} + +static unsigned long hwmem_get_unmapped_area(struct file *file, + unsigned long addr, unsigned long len, unsigned long pgoff, + unsigned long flags) +{ + /* + * pgoff will not be valid as it contains a buffer id (right shifted + * PAGE_SHIFT bits). To not confuse get_unmapped_area we'll not pass + * on file or pgoff. + */ + return current->mm->get_unmapped_area(NULL, addr, len, 0, flags); +} + +int __init hwmem_ioctl_init(void) +{ + if (PAGE_SHIFT < 1 || PAGE_SHIFT > 30 || sizeof(size_t) != 4 || + sizeof(int) > 4 || sizeof(enum hwmem_alloc_flags) != 4 || + sizeof(enum hwmem_access) != 4 || + sizeof(enum hwmem_mem_type) != 4) { + dev_err(hwmem_device.this_device, "PAGE_SHIFT < 1 || PAGE_SHIFT" + " > 30 || sizeof(size_t) != 4 || sizeof(int) > 4 ||" + " sizeof(enum hwmem_alloc_flags) != 4 || sizeof(enum" + " hwmem_access) != 4 || sizeof(enum hwmem_mem_type)" + " != 4\n"); + return -ENOMSG; + } + if (PAGE_SHIFT > 15) + dev_warn(hwmem_device.this_device, "Due to the page size only" + " %u id:s per file instance are available\n", + ((u32)1 << (31 - PAGE_SHIFT)) - 1); + + return misc_register(&hwmem_device); +} + +void __exit hwmem_ioctl_exit(void) +{ + misc_deregister(&hwmem_device); +} diff --git a/drivers/misc/hwmem/hwmem-main.c b/drivers/misc/hwmem/hwmem-main.c new file mode 100644 index 00000000000..9162ff4fc91 --- /dev/null +++ b/drivers/misc/hwmem/hwmem-main.c @@ -0,0 +1,725 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * Hardware memory driver, hwmem + * + * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>, + * Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/idr.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/err.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/pid.h> +#include <linux/list.h> +#include <linux/hwmem.h> +#include <linux/debugfs.h> +#include <linux/uaccess.h> +#include <linux/io.h> +#include <linux/kallsyms.h> +#include <linux/vmalloc.h> +#include "cache_handler.h" + +#define S32_MAX 2147483647 + +struct hwmem_alloc_threadg_info { + struct list_head list; + + struct pid *threadg_pid; /* Ref counted */ + + enum hwmem_access access; +}; + +struct hwmem_alloc { + struct list_head list; + + atomic_t ref_cnt; + + enum hwmem_alloc_flags flags; + struct hwmem_mem_type_struct *mem_type; + + void *allocator_hndl; + phys_addr_t paddr; + void *kaddr; + size_t size; + s32 name; + + /* Access control */ + enum hwmem_access default_access; + struct list_head threadg_info_list; + + /* Cache handling */ + struct cach_buf cach_buf; + +#ifdef CONFIG_DEBUG_FS + /* Debug */ + void *creator; + pid_t creator_tgid; +#endif /* #ifdef CONFIG_DEBUG_FS */ +}; + +static struct platform_device *hwdev; + +static LIST_HEAD(alloc_list); +static DEFINE_IDR(global_idr); +static DEFINE_MUTEX(lock); + +static void vm_open(struct vm_area_struct *vma); +static void vm_close(struct vm_area_struct *vma); +static struct vm_operations_struct vm_ops = { + .open = vm_open, + .close = vm_close, +}; + +static void kunmap_alloc(struct hwmem_alloc *alloc); + +/* Helpers */ + +static void destroy_alloc_threadg_info( + struct hwmem_alloc_threadg_info *info) +{ + if (info->threadg_pid) + put_pid(info->threadg_pid); + + kfree(info); +} + +static void clean_alloc_threadg_info_list(struct hwmem_alloc *alloc) +{ + struct hwmem_alloc_threadg_info *info; + struct hwmem_alloc_threadg_info *tmp; + + list_for_each_entry_safe(info, tmp, &(alloc->threadg_info_list), + list) { + list_del(&info->list); + destroy_alloc_threadg_info(info); + } +} + +static enum hwmem_access get_access(struct hwmem_alloc *alloc) +{ + struct hwmem_alloc_threadg_info *info; + struct pid *my_pid; + bool found = false; + + my_pid = find_get_pid(task_tgid_nr(current)); + if (!my_pid) + return 0; + + list_for_each_entry(info, &(alloc->threadg_info_list), list) { + if (info->threadg_pid == my_pid) { + found = true; + break; + } + } + + put_pid(my_pid); + + if (found) + return info->access; + else + return alloc->default_access; +} + +static void clear_alloc_mem(struct hwmem_alloc *alloc) +{ + cach_set_domain(&alloc->cach_buf, HWMEM_ACCESS_WRITE, + HWMEM_DOMAIN_CPU, NULL); + + memset(alloc->kaddr, 0, alloc->size); +} + +static void destroy_alloc(struct hwmem_alloc *alloc) +{ + list_del(&alloc->list); + + if (alloc->name != 0) { + idr_remove(&global_idr, alloc->name); + alloc->name = 0; + } + + clean_alloc_threadg_info_list(alloc); + + kunmap_alloc(alloc); + + if (!IS_ERR_OR_NULL(alloc->allocator_hndl)) + alloc->mem_type->allocator_api.free( + alloc->mem_type->allocator_instance, + alloc->allocator_hndl); + + kfree(alloc); +} + +static int kmap_alloc(struct hwmem_alloc *alloc) +{ + int ret; + pgprot_t pgprot; + void *alloc_kaddr; + + alloc_kaddr = alloc->mem_type->allocator_api.get_alloc_kaddr( + alloc->mem_type->allocator_instance, alloc->allocator_hndl); + if (IS_ERR(alloc_kaddr)) + return PTR_ERR(alloc_kaddr); + + pgprot = PAGE_KERNEL; + cach_set_pgprot_cache_options(&alloc->cach_buf, &pgprot); + + ret = ioremap_page_range((unsigned long)alloc_kaddr, + (unsigned long)alloc_kaddr + alloc->size, alloc->paddr, pgprot); + if (ret < 0) { + dev_warn(&hwdev->dev, "Failed to map %#x - %#x", alloc->paddr, + alloc->paddr + alloc->size); + return ret; + } + + alloc->kaddr = alloc_kaddr; + + return 0; +} + +static void kunmap_alloc(struct hwmem_alloc *alloc) +{ + if (alloc->kaddr == NULL) + return; + + unmap_kernel_range((unsigned long)alloc->kaddr, alloc->size); + + alloc->kaddr = NULL; +} + +static struct hwmem_mem_type_struct *resolve_mem_type( + enum hwmem_mem_type mem_type) +{ + unsigned int i; + for (i = 0; i < hwmem_num_mem_types; i++) { + if (hwmem_mem_types[i].id == mem_type) + return &hwmem_mem_types[i]; + } + + return ERR_PTR(-ENOENT); +} + +/* HWMEM API */ + +struct hwmem_alloc *hwmem_alloc(size_t size, enum hwmem_alloc_flags flags, + enum hwmem_access def_access, enum hwmem_mem_type mem_type) +{ + int ret; + struct hwmem_alloc *alloc; + + if (hwdev == NULL) { + printk(KERN_ERR "HWMEM: Badly configured\n"); + return ERR_PTR(-ENOMSG); + } + + if (size == 0) + return ERR_PTR(-EINVAL); + + mutex_lock(&lock); + + size = PAGE_ALIGN(size); + + alloc = kzalloc(sizeof(struct hwmem_alloc), GFP_KERNEL); + if (alloc == NULL) { + ret = -ENOMEM; + goto alloc_alloc_failed; + } + + INIT_LIST_HEAD(&alloc->list); + atomic_inc(&alloc->ref_cnt); + alloc->flags = flags; + alloc->default_access = def_access; + INIT_LIST_HEAD(&alloc->threadg_info_list); + alloc->creator = __builtin_return_address(0); + alloc->creator_tgid = task_tgid_nr(current); + + alloc->mem_type = resolve_mem_type(mem_type); + if (IS_ERR(alloc->mem_type)) { + ret = PTR_ERR(alloc->mem_type); + goto resolve_mem_type_failed; + } + + alloc->allocator_hndl = alloc->mem_type->allocator_api.alloc( + alloc->mem_type->allocator_instance, size); + if (IS_ERR(alloc->allocator_hndl)) { + ret = PTR_ERR(alloc->allocator_hndl); + goto allocator_failed; + } + + alloc->paddr = alloc->mem_type->allocator_api.get_alloc_paddr( + alloc->allocator_hndl); + alloc->size = alloc->mem_type->allocator_api.get_alloc_size( + alloc->allocator_hndl); + + cach_init_buf(&alloc->cach_buf, alloc->flags, alloc->size); + ret = kmap_alloc(alloc); + if (ret < 0) + goto kmap_alloc_failed; + cach_set_buf_addrs(&alloc->cach_buf, alloc->kaddr, alloc->paddr); + + list_add_tail(&alloc->list, &alloc_list); + + clear_alloc_mem(alloc); + + goto out; + +kmap_alloc_failed: +allocator_failed: +resolve_mem_type_failed: + destroy_alloc(alloc); +alloc_alloc_failed: + alloc = ERR_PTR(ret); + +out: + mutex_unlock(&lock); + + return alloc; +} +EXPORT_SYMBOL(hwmem_alloc); + +void hwmem_release(struct hwmem_alloc *alloc) +{ + mutex_lock(&lock); + + if (atomic_dec_and_test(&alloc->ref_cnt)) + destroy_alloc(alloc); + + mutex_unlock(&lock); +} +EXPORT_SYMBOL(hwmem_release); + +int hwmem_set_domain(struct hwmem_alloc *alloc, enum hwmem_access access, + enum hwmem_domain domain, struct hwmem_region *region) +{ + mutex_lock(&lock); + + cach_set_domain(&alloc->cach_buf, access, domain, region); + + mutex_unlock(&lock); + + return 0; +} +EXPORT_SYMBOL(hwmem_set_domain); + +int hwmem_pin(struct hwmem_alloc *alloc, struct hwmem_mem_chunk *mem_chunks, + u32 *mem_chunks_length) +{ + if (*mem_chunks_length < 1) { + *mem_chunks_length = 1; + return -ENOSPC; + } + + mutex_lock(&lock); + + mem_chunks[0].paddr = alloc->paddr; + mem_chunks[0].size = alloc->size; + *mem_chunks_length = 1; + + mutex_unlock(&lock); + + return 0; +} +EXPORT_SYMBOL(hwmem_pin); + +void hwmem_unpin(struct hwmem_alloc *alloc) +{ +} +EXPORT_SYMBOL(hwmem_unpin); + +static void vm_open(struct vm_area_struct *vma) +{ + atomic_inc(&((struct hwmem_alloc *)vma->vm_private_data)->ref_cnt); +} + +static void vm_close(struct vm_area_struct *vma) +{ + hwmem_release((struct hwmem_alloc *)vma->vm_private_data); +} + +int hwmem_mmap(struct hwmem_alloc *alloc, struct vm_area_struct *vma) +{ + int ret = 0; + unsigned long vma_size = vma->vm_end - vma->vm_start; + enum hwmem_access access; + mutex_lock(&lock); + + access = get_access(alloc); + + /* Check permissions */ + if ((!(access & HWMEM_ACCESS_WRITE) && + (vma->vm_flags & VM_WRITE)) || + (!(access & HWMEM_ACCESS_READ) && + (vma->vm_flags & VM_READ))) { + ret = -EPERM; + goto illegal_access; + } + + if (vma_size > alloc->size) { + ret = -EINVAL; + goto illegal_size; + } + + /* + * We don't want Linux to do anything (merging etc) with our VMAs as + * the offset is not necessarily valid + */ + vma->vm_flags |= VM_SPECIAL; + cach_set_pgprot_cache_options(&alloc->cach_buf, &vma->vm_page_prot); + vma->vm_private_data = (void *)alloc; + atomic_inc(&alloc->ref_cnt); + vma->vm_ops = &vm_ops; + + ret = remap_pfn_range(vma, vma->vm_start, alloc->paddr >> PAGE_SHIFT, + min(vma_size, (unsigned long)alloc->size), vma->vm_page_prot); + if (ret < 0) + goto map_failed; + + goto out; + +map_failed: + atomic_dec(&alloc->ref_cnt); +illegal_size: +illegal_access: + +out: + mutex_unlock(&lock); + + return ret; +} +EXPORT_SYMBOL(hwmem_mmap); + +void *hwmem_kmap(struct hwmem_alloc *alloc) +{ + void *ret; + + mutex_lock(&lock); + + ret = alloc->kaddr; + + mutex_unlock(&lock); + + return ret; +} +EXPORT_SYMBOL(hwmem_kmap); + +void hwmem_kunmap(struct hwmem_alloc *alloc) +{ +} +EXPORT_SYMBOL(hwmem_kunmap); + +int hwmem_set_access(struct hwmem_alloc *alloc, + enum hwmem_access access, pid_t pid_nr) +{ + int ret; + struct hwmem_alloc_threadg_info *info; + struct pid *pid; + bool found = false; + + pid = find_get_pid(pid_nr); + if (!pid) { + ret = -EINVAL; + goto error_get_pid; + } + + list_for_each_entry(info, &(alloc->threadg_info_list), list) { + if (info->threadg_pid == pid) { + found = true; + break; + } + } + + if (!found) { + info = kmalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + ret = -ENOMEM; + goto error_alloc_info; + } + + info->threadg_pid = pid; + info->access = access; + + list_add_tail(&(info->list), &(alloc->threadg_info_list)); + } else { + info->access = access; + } + + return 0; + +error_alloc_info: + put_pid(pid); +error_get_pid: + return ret; +} +EXPORT_SYMBOL(hwmem_set_access); + +void hwmem_get_info(struct hwmem_alloc *alloc, u32 *size, + enum hwmem_mem_type *mem_type, enum hwmem_access *access) +{ + mutex_lock(&lock); + + if (size != NULL) + *size = alloc->size; + if (mem_type != NULL) + *mem_type = alloc->mem_type->id; + if (access != NULL) + *access = get_access(alloc); + + mutex_unlock(&lock); +} +EXPORT_SYMBOL(hwmem_get_info); + +s32 hwmem_get_name(struct hwmem_alloc *alloc) +{ + int ret = 0, name; + + mutex_lock(&lock); + + if (alloc->name != 0) { + ret = alloc->name; + goto out; + } + + while (true) { + if (idr_pre_get(&global_idr, GFP_KERNEL) == 0) { + ret = -ENOMEM; + goto pre_get_id_failed; + } + + ret = idr_get_new_above(&global_idr, alloc, 1, &name); + if (ret == 0) + break; + else if (ret != -EAGAIN) + goto get_id_failed; + } + + if (name > S32_MAX) { + ret = -ENOMSG; + goto overflow; + } + + alloc->name = name; + + ret = name; + goto out; + +overflow: + idr_remove(&global_idr, name); +get_id_failed: +pre_get_id_failed: + +out: + mutex_unlock(&lock); + + return ret; +} +EXPORT_SYMBOL(hwmem_get_name); + +struct hwmem_alloc *hwmem_resolve_by_name(s32 name) +{ + struct hwmem_alloc *alloc; + + mutex_lock(&lock); + + alloc = idr_find(&global_idr, name); + if (alloc == NULL) { + alloc = ERR_PTR(-EINVAL); + goto find_failed; + } + atomic_inc(&alloc->ref_cnt); + + goto out; + +find_failed: + +out: + mutex_unlock(&lock); + + return alloc; +} +EXPORT_SYMBOL(hwmem_resolve_by_name); + +/* Debug */ + +#ifdef CONFIG_DEBUG_FS + +static int debugfs_allocs_read(struct file *filp, char __user *buf, + size_t count, loff_t *f_pos); + +static const struct file_operations debugfs_allocs_fops = { + .owner = THIS_MODULE, + .read = debugfs_allocs_read, +}; + +static int print_alloc(struct hwmem_alloc *alloc, char **buf, size_t buf_size) +{ + int ret; + char creator[KSYM_SYMBOL_LEN]; + int i; + + if (sprint_symbol(creator, (unsigned long)alloc->creator) < 0) + creator[0] = '\0'; + + for (i = 0; i < 2; i++) { + size_t buf_size_l; + if (i == 0) + buf_size_l = 0; + else + buf_size_l = buf_size; + + ret = snprintf(*buf, buf_size_l, + "%#x\n" + "\tSize: %u\n" + "\tMemory type: %u\n" + "\tName: %#x\n" + "\tReference count: %i\n" + "\tAllocation flags: %#x\n" + "\t$ settings: %#x\n" + "\tDefault access: %#x\n" + "\tPhysical address: %#x\n" + "\tKernel virtual address: %#x\n" + "\tCreator: %s\n" + "\tCreator thread group id: %u\n", + (unsigned int)alloc, alloc->size, alloc->mem_type->id, + alloc->name, atomic_read(&alloc->ref_cnt), + alloc->flags, alloc->cach_buf.cache_settings, + alloc->default_access, alloc->paddr, + (unsigned int)alloc->kaddr, creator, + alloc->creator_tgid); + if (ret < 0) + return -ENOMSG; + else if (ret + 1 > buf_size) + return -EINVAL; + } + + *buf += ret; + + return 0; +} + +static int debugfs_allocs_read(struct file *file, char __user *buf, + size_t count, loff_t *f_pos) +{ + /* + * We assume the supplied buffer and PAGE_SIZE is large enough to hold + * information about at least one alloc, if not no data will be + * returned. + */ + + int ret; + size_t i = 0; + struct hwmem_alloc *curr_alloc; + char *local_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + char *local_buf_pos = local_buf; + size_t available_space = min((size_t)PAGE_SIZE, count); + /* private_data is intialized to NULL in open which I assume is 0. */ + void **curr_pos = &file->private_data; + size_t bytes_read; + + if (local_buf == NULL) + return -ENOMEM; + + mutex_lock(&lock); + + list_for_each_entry(curr_alloc, &alloc_list, list) { + if (i++ < (size_t)*curr_pos) + continue; + + ret = print_alloc(curr_alloc, &local_buf_pos, available_space - + (size_t)(local_buf_pos - local_buf)); + if (ret == -EINVAL) /* No more room */ + break; + else if (ret < 0) + goto out; + + *curr_pos = (void *)i; + } + + bytes_read = (size_t)(local_buf_pos - local_buf); + + ret = copy_to_user(buf, local_buf, bytes_read); + if (ret < 0) + goto out; + + ret = bytes_read; + +out: + kfree(local_buf); + + mutex_unlock(&lock); + + return ret; +} + +static void init_debugfs(void) +{ + /* Hwmem is never unloaded so dropping the dentrys is ok. */ + struct dentry *debugfs_root_dir = debugfs_create_dir("hwmem", NULL); + (void)debugfs_create_file("allocs", 0444, debugfs_root_dir, 0, + &debugfs_allocs_fops); +} + +#endif /* #ifdef CONFIG_DEBUG_FS */ + +/* Module */ + +extern int hwmem_ioctl_init(void); + +static int __devinit hwmem_probe(struct platform_device *pdev) +{ + int ret; + + if (hwdev) { + dev_err(&pdev->dev, "Probed multiple times\n"); + return -EINVAL; + } + + hwdev = pdev; + + /* + * No need to flush the caches here. If we can keep track of the cache + * content then none of our memory will be in the caches, if we can't + * keep track of the cache content we always assume all our memory is + * in the caches. + */ + + ret = hwmem_ioctl_init(); + if (ret < 0) + dev_warn(&pdev->dev, "Failed to start hwmem-ioctl, continuing" + " anyway\n"); + +#ifdef CONFIG_DEBUG_FS + init_debugfs(); +#endif + + dev_info(&pdev->dev, "Probed OK\n"); + + return 0; +} + +static struct platform_driver hwmem_driver = { + .probe = hwmem_probe, + .driver = { + .name = "hwmem", + }, +}; + +static int __init hwmem_init(void) +{ + return platform_driver_register(&hwmem_driver); +} +subsys_initcall(hwmem_init); + +MODULE_AUTHOR("Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Hardware memory driver"); + diff --git a/drivers/misc/i2s/Kconfig b/drivers/misc/i2s/Kconfig new file mode 100644 index 00000000000..569818caa5d --- /dev/null +++ b/drivers/misc/i2s/Kconfig @@ -0,0 +1,19 @@ +# +# U8500 I2S HW kernel configuration +# +config STM_I2S + bool "U8500 I2S hardware driver" + depends on ARCH_U8500 && STE_DMA40 + default y + ---help--- + If you say Y here, you will enable the U8500 I2S hardware driver. + + If unsure, say N. +config STM_MSP_I2S + tristate "U8500 MSP_I2S hardware driver" + depends on ARCH_U8500 && STE_DMA40 && STM_I2S + default y + ---help--- + If you say Y here, you will enable the U8500 MSP_I2S hardware driver. + + If unsure, say N. diff --git a/drivers/misc/i2s/Makefile b/drivers/misc/i2s/Makefile new file mode 100644 index 00000000000..75d361d5deb --- /dev/null +++ b/drivers/misc/i2s/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for I2S drivers +# + +nmdk_i2s-objs := i2s.o +obj-$(CONFIG_STM_I2S) += nmdk_i2s.o +obj-$(CONFIG_STM_MSP_I2S) += msp_i2s.o diff --git a/drivers/misc/i2s/i2s.c b/drivers/misc/i2s/i2s.c new file mode 100644 index 00000000000..b4c243b7cb2 --- /dev/null +++ b/drivers/misc/i2s/i2s.c @@ -0,0 +1,631 @@ +/*----------------------------------------------------------------------------*/ +/* copyright STMicroelectronics, 2007. */ +/* */ +/* This program is free software; you can redistribute it and/or modify it */ +/* under the terms of the GNU General Public License as published by the Free */ +/* Software Foundation; either version 2.1 of the License, or (at your option)*/ +/* any later version. */ +/* */ +/* This program is distributed in the hope that it will be useful, but */ +/* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY */ +/* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License */ +/* for more details. */ +/* */ +/* You should have received a copy of the GNU General Public License */ +/* along with this program. If not, see <http://www.gnu.org/licenses/>. */ +/*----------------------------------------------------------------------------*/ + +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/init.h> +#include <linux/cache.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/idr.h> +#include <linux/i2s/i2s.h> +#include <linux/platform_device.h> + +/*******************************************************************************/ +static DEFINE_MUTEX(core_lock); + +static void i2sdev_release(struct device *dev) +{ + struct i2s_device *i2s = to_i2s_device(dev); + + if (i2s->controller) + put_device(&(i2s->controller->dev)); + kfree(dev); +} +static ssize_t +modalias_show(struct device *dev, struct device_attribute *a, char *buf) +{ + const struct i2s_device *i2s = to_i2s_device(dev); + return sprintf(buf, "%s\n", i2s->modalias); +} + +static struct device_attribute i2s_dev_attrs[] = { + __ATTR_RO(modalias), + __ATTR_NULL, +}; + +/* modalias support makes "modprobe $MODALIAS" new-style hotplug work, + * and the sysfs version makes coldplug work too. + */ +static const struct i2s_device_id *i2s_match_id(const struct i2s_device_id *id, + const struct i2s_device *device) +{ + while (id->name[0]) { + if (strcmp(device->modalias, id->name) == 0) + return id; + id++; + } + return NULL; +} + +static int i2s_match_device(struct device *dev, struct device_driver *drv) +{ + const struct i2s_device *device = to_i2s_device(dev); + struct i2s_driver *driver = to_i2s_driver(drv); + if (driver->id_table) + return i2s_match_id(driver->id_table, device) != NULL; + return 0; +} + +static int i2s_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + const struct i2s_device *i2s = to_i2s_device(dev); + + add_uevent_var(env, "MODALIAS=%s", i2s->modalias); + return 0; +} + +#ifdef CONFIG_PM +static int i2s_suspend(struct device *dev, pm_message_t message) +{ + int value = 0; + struct i2s_driver *drv = to_i2s_driver(dev->driver); + + /* suspend will stop irqs and dma; no more i/o */ + if (drv) { + if (drv->suspend) + value = drv->suspend(to_i2s_device(dev), message); + else + dev_dbg(dev, "... can't suspend\n"); + } + return value; +} + +static int i2s_resume(struct device *dev) +{ + int value = 0; + struct i2s_driver *drv = to_i2s_driver(dev->driver); + + /* resume may restart the i/o queue */ + if (drv) { + if (drv->resume) + value = drv->resume(to_i2s_device(dev)); + else + dev_dbg(dev, "... can't resume\n"); + } + return value; +} + +#else +#define i2s_suspend NULL +#define i2s_resume NULL +#endif + +/*This bus is designed to handle various protocols supported by the MSP- ARM Primecell IP + * such as + * I2s, PCM, AC97, TDM .... (refer to the data sheet for the complete list. + * Current MSP driver has the above ones coded. + * */ +struct bus_type i2s_bus_type = { + .name = "i2s", + .dev_attrs = i2s_dev_attrs, + .match = i2s_match_device, + .uevent = i2s_uevent, + .suspend = i2s_suspend, + .resume = i2s_resume, +}; + +EXPORT_SYMBOL_GPL(i2s_bus_type); + +static int i2s_drv_probe(struct device *dev) +{ + const struct i2s_driver *sdrv = to_i2s_driver(dev->driver); + + return sdrv->probe(to_i2s_device(dev)); +} + +static int i2s_drv_remove(struct device *dev) +{ + const struct i2s_driver *sdrv = to_i2s_driver(dev->driver); + + return sdrv->remove(to_i2s_device(dev)); +} + +static void i2s_drv_shutdown(struct device *dev) +{ + const struct i2s_driver *sdrv = to_i2s_driver(dev->driver); + + sdrv->shutdown(to_i2s_device(dev)); +} + +/** + * i2s_register_driver - register a I2S driver + * @sdrv: the driver to register + * Context: can sleep + */ +int i2s_register_driver(struct i2s_driver *sdrv) +{ + sdrv->driver.bus = &i2s_bus_type; + if (sdrv->probe) + sdrv->driver.probe = i2s_drv_probe; + if (sdrv->remove) + sdrv->driver.remove = i2s_drv_remove; + if (sdrv->shutdown) + sdrv->driver.shutdown = i2s_drv_shutdown; + return driver_register(&sdrv->driver); +} + +EXPORT_SYMBOL_GPL(i2s_register_driver); + +/******************************************************************************/ +struct board_i2s_combined_info { + struct i2s_board_info board_info; + struct i2s_device *i2s_dev_p; +}; +struct boardinfo { + struct list_head list; + unsigned n_board_info; + struct board_i2s_combined_info board_i2s_info[0]; +}; + +static LIST_HEAD(board_list); +static DEFINE_MUTEX(board_lock); + +/* + * Get an i2s device. Used in MSP LTP tests. + */ +struct i2s_device *i2s_get_device_from_boardinfo(int chip_select) +{ + struct boardinfo *bi; + struct i2s_device *i2s_dev_p = NULL; + + mutex_lock(&board_lock); + list_for_each_entry(bi, &board_list, list) { + struct board_i2s_combined_info *chip = bi->board_i2s_info; + unsigned n; + + for (n = bi->n_board_info; n > 0; n--, chip++) + if (chip->board_info.chip_select == chip_select) { + i2s_dev_p = chip->i2s_dev_p; + break; + } + if (i2s_dev_p != NULL) + break; + } + mutex_unlock(&board_lock); + + return i2s_dev_p; +} + +EXPORT_SYMBOL_GPL(i2s_get_device_from_boardinfo); + +/* I2S devices should normally not be created by I2S device drivers; that + * would make them board-specific. Similarly with I2S master drivers. + * Device registration normally goes into like arch/.../mach.../board-YYY.c + * with other readonly (flashable) information about mainboard devices. + */ +struct i2s_device *i2s_alloc_device(struct device *device) +{ + struct i2s_device *i2s; + struct device *dev = device->parent; + + get_device(device); + i2s = kzalloc(sizeof *i2s, GFP_KERNEL); + if (!i2s) { + dev_err(dev, "cannot alloc i2s_device\n"); + return NULL; + } + + i2s->dev.parent = dev; + i2s->dev.bus = &i2s_bus_type; + i2s->dev.release = i2sdev_release; + device_initialize(&i2s->dev); + return i2s; +} + +EXPORT_SYMBOL_GPL(i2s_alloc_device); + +/** + * i2s_add_device - Add i2s_device allocated with i2s_alloc_device + * @i2s: i2s_device to register + * + * Companion function to i2s_alloc_device. Devices allocated with + * i2s_alloc_device can be added onto the i2s bus with this function. + * + * Returns 0 on success; negative errno on failure + */ +int i2s_add_device(struct i2s_device *i2s) +{ + static DEFINE_MUTEX(i2s_add_lock); + struct device *dev = i2s->dev.parent; + int status; + + dev_set_name(&i2s->dev, "%s.%u", "i2s", i2s->chip_select); + + mutex_lock(&i2s_add_lock); + + if (bus_find_device_by_name(&i2s_bus_type, NULL, dev_name(&i2s->dev)) + != NULL) { + dev_err(dev, "chipselect %d already in use\n", + i2s->chip_select); + status = -EBUSY; + goto done; + } + + /* Device may be bound to an active driver when this returns */ + status = device_add(&i2s->dev); + if (status < 0) + dev_err(dev, "can't %s %s, status %d\n", + "add", dev_name(&i2s->dev), status); + else + dev_dbg(dev, "registered child %s\n", dev_name(&i2s->dev)); + + done: + mutex_unlock(&i2s_add_lock); + return status; +} + +EXPORT_SYMBOL_GPL(i2s_add_device); + +/** + * i2s_new_device - instantiate one new I2S device + * @i2s_cont: Controller to which device is connected + * @chip: Describes the I2S device + * Context: can sleep + * + * On typical mainboards, this is purely internal; and it's not needed + * after board init creates the hard-wired devices. Some development + * platforms may not be able to use i2s_register_board_info though, and + * this is exported so that driver could add devices (which it would + * learn about out-of-band). + * + * Returns the new device, or NULL. + */ +struct i2s_device *i2s_new_device(struct i2s_controller *i2s_cont, + struct i2s_board_info *chip) +{ + struct i2s_device *proxy; + int status; + + /* NOTE: caller did any chip->bus_num checks necessary. + * + * Also, unless we change the return value convention to use + * error-or-pointer (not NULL-or-pointer), troubleshootability + * suggests syslogged diagnostics are best here (ugh). + */ + + proxy = i2s_alloc_device(&i2s_cont->dev); + if (!proxy) + return NULL; + + WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); + + proxy->chip_select = chip->chip_select; + strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); + proxy->dev.platform_data = (void *)chip->platform_data; + proxy->controller = i2s_cont; + + status = i2s_add_device(proxy); + if (status < 0) { + kfree(proxy); + return NULL; + } + + return proxy; +} + +EXPORT_SYMBOL_GPL(i2s_new_device); + +/** + * i2s_register_board_info - register I2S devices for a given board + * @info: array of chip descriptors + * @n: how many descriptors are provided + * Context: can sleep + * + * Board-specific early init code calls this (probably during arch_initcall) + * with segments of the I2S device table. Any device nodes are created later, + * after the relevant parent I2S controller (id) is defined. We keep + * this table of devices forever, so that reloading a controller driver will + * not make Linux forget about these hard-wired devices. + * + */ +int __init +i2s_register_board_info(struct i2s_board_info const *info, unsigned n) +{ + int i; + struct boardinfo *bi; + + bi = kmalloc(sizeof(*bi) + (n * sizeof(struct board_i2s_combined_info)), GFP_KERNEL); + if (!bi) + return -ENOMEM; + bi->n_board_info = n; + + for (i = 0; i < n; i++) + memcpy(&bi->board_i2s_info[i].board_info, &info[i], sizeof *info); + + mutex_lock(&board_lock); + list_add_tail(&bi->list, &board_list); + mutex_unlock(&board_lock); + return 0; +} + +/** + * scan_boardinfo - Scan, creates and registered new i2s device structure. + * @i2s_cont: i2s controller structure + * Context: process + * + * It will scan the device list that may be registered statically using + * register_board_info func in arch specific directory and call + * i2s_new_device to create and registered i2s device over i2s bus. It is + * called by i2s_add_controller function. + * + * Returns void. + */ +static void scan_boardinfo(struct i2s_controller *i2s_cont) +{ + struct boardinfo *bi; + + mutex_lock(&board_lock); + list_for_each_entry(bi, &board_list, list) { + struct board_i2s_combined_info *chip = bi->board_i2s_info; + unsigned n; + + for (n = bi->n_board_info; n > 0; n--, chip++) { + if (chip->board_info.chip_select != i2s_cont->id) + continue; + /* NOTE: this relies on i2s_new_device to + * issue diagnostics when given bogus inputs + */ + chip->i2s_dev_p = i2s_new_device(i2s_cont, &chip->board_info); + } + } + mutex_unlock(&board_lock); +} + +/******************************************************************************/ +/**I2S Controller inittialization*/ +static void i2s_controller_dev_release(struct device *dev) +{ + struct i2s_controller *i2s_cont; + i2s_cont = container_of(dev, struct i2s_controller, dev); + kfree(i2s_cont); +} + +static ssize_t +show_controller_name(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct i2s_controller *cont = to_i2s_controller(dev); + return sprintf(buf, "%s\n", cont->name); +} + +static struct device_attribute i2s_controller_attrs[] = { + __ATTR(name, S_IRUGO, show_controller_name, NULL), + {}, +}; + +static struct class i2s_controller_class = { + .owner = THIS_MODULE, + .name = "i2s-controller", + .dev_attrs = i2s_controller_attrs, +}; + +static int i2s_register_controller(struct i2s_controller *cont) +{ + int res = 0; + mutex_init(&cont->bus_lock); + + mutex_lock(&core_lock); + + /* Add the controller to the driver core. + * If the parent pointer is not set up, + * we add this controller to the host bus. + */ + if (cont->dev.parent == NULL) { + cont->dev.parent = &platform_bus; + pr_debug("I2S controller driver [%s] forgot to specify " + "physical device\n", cont->name); + } + dev_set_name(&cont->dev, "I2Scrlr-%d", cont->id); + cont->dev.release = &i2s_controller_dev_release; + cont->dev.class = &i2s_controller_class; + res = device_register(&cont->dev); + if (res) + goto out_unlock; + + dev_dbg(&cont->dev, "controller [%s] registered\n", cont->name); + scan_boardinfo(cont); + out_unlock: + mutex_unlock(&core_lock); + return res; +} + +/** + * i2s_add_controller - declare i2s controller, use dynamic bus number + * @controller: the controller to add + * Context: can sleep + * + */ +int i2s_add_controller(struct i2s_controller *controller) +{ + return i2s_register_controller(controller); +} + +EXPORT_SYMBOL(i2s_add_controller); + +static int __unregister(struct device *dev, void *controller_dev) +{ + /* note: before about 2.6.14-rc1 this would corrupt memory: */ + if (dev != controller_dev) + i2s_unregister_device(to_i2s_device(dev)); + return 0; +} + +/** + * i2s_del_controller - unregister I2S controller + * @cont: the controller being unregistered + * Context: can sleep + * + * This unregisters an I2S controller which was previously registered + * by @i2s_add_controller. + */ +int i2s_del_controller(struct i2s_controller *cont) +{ + int res = 0; + int dummy; + mutex_lock(&core_lock); + + dummy = device_for_each_child(cont->dev.parent, &cont->dev, + __unregister); + device_unregister(&cont->dev); + mutex_unlock(&core_lock); + return res; +} + +EXPORT_SYMBOL(i2s_del_controller); + +/******************************************************************************/ +/*I2S interface apis*/ + +/** + * i2s_transfer - Main i2s transfer function. + * @i2s_cont: i2s controller structure passed by client driver. + * @message: i2s message structure contains transceive info. + * Context: process or interrupt. + * + * This API is called by client i2s driver as i2s_xfer funtion. It will handle + * main i2s transfer over i2s bus. The controller should registered its own + * functions using i2s algorithm structure. + * + * Returns error(-1) in case of failure or success(0). + */ +int i2s_transfer(struct i2s_controller *i2s_cont, struct i2s_message *message) +{ + return i2s_cont->algo->cont_transfer(i2s_cont, message); + +} + +EXPORT_SYMBOL(i2s_transfer); + +/** + * i2s_cleanup - Close the current i2s connection btw controller and client. + * @i2s_cont: i2s controller structure + * @flag: It indicates the functionality that needs to be disabled. + * Context: process + * + * This API will disable and reset the controller's configuration. Reset the + * controller so that i2s client driver can reconfigure with new configuration. + * Controller should release all the necessary resources which was acquired + * during setup. + * + * Returns error(-1) in case of failure or success(0). + */ +int i2s_cleanup(struct i2s_controller *i2s_cont, i2s_flag flag) +{ + int status = 0; + status = i2s_cont->algo->cont_cleanup(i2s_cont, flag); + if (status) + return -1; + else + return 0; +} + +EXPORT_SYMBOL(i2s_cleanup); + +/** + * i2s_setup - configures and enables the I2S controller. + * @i2s_cont: i2s controller sent by i2s device. + * @config: specifies the configuration parameters. + * + * This function configures the I2S controller with the client configuration. + * Controller was already registered on I2S bus by some master controller + * driver. + * + * Returns error(-1) in case of failure else success(0) + */ +int i2s_setup(struct i2s_controller *i2s_cont, void *config) +{ + return i2s_cont->algo->cont_setup(i2s_cont, config); +} + +EXPORT_SYMBOL(i2s_setup); + +/** + * i2s_hw_status - Get the current hw status for the i2s controller. + * @i2s_cont: i2s controller structure passed by client driver. + * Context: process or interrupt. + * + * This API is called by client i2s driver to find out current hw status. + * The controller should registered its own functions using i2s algorithm structure. + * + * Returns current hw status register. + */ +int i2s_hw_status(struct i2s_controller *i2s_cont) +{ + return i2s_cont->algo->cont_hw_status(i2s_cont); +} + +/** + * i2s_get_pointer - Get the current dma_addr_t for the i2s controller. + * @i2s_cont: i2s controller structure passed by client driver. + * @i2s_direction: Specifies TX or RX direction. + * Context: process or interrupt. + * + * This API is called by client i2s driver to return a dma_addr_t corresponding + * to the position of the DMA-controller. + * The controller should registered its own functions using i2s algorithm structure. + * + * Returns current hw status register. + */ +dma_addr_t i2s_get_pointer(struct i2s_controller *i2s_cont, + enum i2s_direction_t i2s_direction) +{ + return i2s_cont->algo->cont_get_pointer(i2s_cont, i2s_direction); +} + +/******************************************************************************/ + +static int __init i2s_init(void) +{ + int status; + + status = bus_register(&i2s_bus_type); + if (status < 0) + goto err0; + + status = class_register(&i2s_controller_class); + if (status < 0) + goto err1; + return 0; + + err1: + bus_unregister(&i2s_bus_type); + err0: + return status; +} + +static void __exit i2s_exit(void) +{ + class_unregister(&i2s_controller_class); + bus_unregister(&i2s_bus_type); +} + +subsys_initcall(i2s_init); +module_exit(i2s_exit); + +MODULE_AUTHOR("Sandeep Kaushik, <sandeep-mmc.kaushik@st.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/i2s/msp_i2s.c b/drivers/misc/i2s/msp_i2s.c new file mode 100644 index 00000000000..6cd04a0b699 --- /dev/null +++ b/drivers/misc/i2s/msp_i2s.c @@ -0,0 +1,2046 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * License terms: + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/fs.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/interrupt.h> +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/delay.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/pfn.h> + +#include <linux/regulator/consumer.h> +#include <mach/prcmu-fw-api.h> +#include <mach/hardware.h> +#include <asm/io.h> +#include <asm/delay.h> +#include <asm/irq.h> +#include <linux/dmaengine.h> + +#include <mach/hardware.h> +#include <mach/irqs.h> +#include <linux/i2s/i2s.h> +#include <mach/msp.h> +#include <linux/dma-mapping.h> + +struct regulator *msp_vape_supply; + +#define STM_MSP_NAME "STM_MSP" +#define MSP_NAME "msp" +#define DRIVER_DEBUG_PFX "MSP" +#define DRIVER_DEBUG CONFIG_STM_MSP_DEBUG +#define DRIVER_DBG "MSP" +#define NMDK_DBG /* message level */ + +extern struct driver_debug_st DBG_ST; + /* Protocol desciptors */ +static const struct msp_protocol_desc protocol_desc_tab[] = { + I2S_PROTOCOL_DESC, + PCM_PROTOCOL_DESC, + PCM_COMPAND_PROTOCOL_DESC, + AC97_PROTOCOL_DESC, + SPI_MASTER_PROTOCOL_DESC, + SPI_SLAVE_PROTOCOL_DESC, +}; + +/* Local static functions */ +static int msp_dma_xfer(struct msp *msp, struct i2s_message *msg); +static int msp_polling_xfer(struct msp *msp, struct i2s_message *msg); +static int msp_interrupt_xfer(struct msp *msp, struct i2s_message *msg); +static int msp_start_dma(struct msp *msp, int transmit, dma_addr_t data, + size_t bytes); +static int configure_protocol(struct msp *msp, + struct msp_config *config); +static int configure_clock(struct msp *msp, + struct msp_config *config); +static int configure_multichannel(struct msp *msp, + struct msp_config *config); +static int stm_msp_configure_enable(struct i2s_controller *i2s_cont, + void *configuration); +static int stm_msp_transceive_data(struct i2s_controller *i2s_cont, + struct i2s_message *message); + +static int stm_msp_disable(struct msp *msp, int direction, + i2s_flag flag); +static int stm_msp_close(struct i2s_controller *i2s_cont, i2s_flag flag); +static int stm_msp_hw_status(struct i2s_controller *i2s_cont); +static dma_addr_t stm_msp_get_pointer(struct i2s_controller *i2s_cont, + enum i2s_direction_t i2s_direction); + +#define I2S_DEVICE "i2s_device" +static struct i2s_algorithm i2s_algo = { + .cont_setup = stm_msp_configure_enable, + .cont_transfer = stm_msp_transceive_data, + .cont_cleanup = stm_msp_close, + .cont_hw_status = stm_msp_hw_status, + .cont_get_pointer = stm_msp_get_pointer, +}; + +/** + * stm_msp_write - writel a value to specified register + * @value: value + * @reg: pointer to register' address + * Context: atomic(can be both process and interrupt) + * Returns void. + */ +static inline void stm_msp_write(u32 value, void __iomem *reg) +{ + writel(value, reg); +} + +/** + * stm_msp_read - readl a value to specified register + * @reg: pointer to register' address + * Context: atomic(can be both process and interrupt) + * Returns u32 register's value. + */ +static inline u32 stm_msp_read(void __iomem *reg) +{ + return readl(reg); +} + +static void u8_msp_read(struct trans_data *xfer_data) +{ + struct i2s_message *message = &xfer_data->message; + while ((message->rx_offset < message->rxbytes) && + !((stm_msp_read(xfer_data->msp->registers + MSP_FLR)) & + RX_FIFO_EMPTY)) { + message->rx_offset += 1; + *(u8 *) message->rxdata = + (u8) stm_msp_read(xfer_data->msp->registers + MSP_DR); + message->rxdata += 1; + } +} + +static void u16_msp_read(struct trans_data *xfer_data) +{ + struct i2s_message *message = &xfer_data->message; + while ((message->rx_offset < message->rxbytes) && + !((stm_msp_read(xfer_data->msp->registers + MSP_FLR)) & + RX_FIFO_EMPTY)) { + message->rx_offset += 2; + *(u16 *) message->rxdata = + (u16) stm_msp_read(xfer_data->msp->registers + MSP_DR); + message->rxdata += 2; + } +} + +/** + * u32_msp_read - Msp 32bit read function. + * @xfer_data: transfer data structure. + * + * It reads 32bit data from msp receive fifo until it gets empty. + * + * Returns void. + */ +static void u32_msp_read(struct trans_data *xfer_data) +{ + struct i2s_message *message = &xfer_data->message; + while ((message->rx_offset < message->rxbytes) && + !((stm_msp_read(xfer_data->msp->registers + MSP_FLR)) & + RX_FIFO_EMPTY)) { + *(u32 *) message->rxdata = + (u32) stm_msp_read(xfer_data->msp->registers + MSP_DR); + message->rx_offset += 4; + message->rxdata += 4; + } +} +static void u8_msp_write(struct trans_data *xfer_data) +{ + struct i2s_message *message = &xfer_data->message; + while ((message->tx_offset < message->txbytes) && + !((stm_msp_read(xfer_data->msp->registers + MSP_FLR)) & + TX_FIFO_FULL)) { + message->tx_offset += 1; + stm_msp_write(*(u8 *) message->txdata, + xfer_data->msp->registers + MSP_DR); + message->txdata += 1; + } +} + +static void u16_msp_write(struct trans_data *xfer_data) +{ + struct i2s_message *message = &xfer_data->message; + while ((message->tx_offset < message->txbytes) && + !((stm_msp_read(xfer_data->msp->registers + MSP_FLR)) & + TX_FIFO_FULL)) { + message->tx_offset += 2; + stm_msp_write(*(u16 *) message->txdata, + xfer_data->msp->registers + MSP_DR); + message->txdata += 2; + } +} + +/** + * u32_msp_write - Msp 32bit write function. + * @xfer_data: transfer data structure. + * + * It writes 32bit data to msp transmit fifo until it gets full. + * + * Returns void. + */ +static void u32_msp_write(struct trans_data *xfer_data) +{ + struct i2s_message *message = &xfer_data->message; + while ((message->tx_offset < message->txbytes) && + !((stm_msp_read(xfer_data->msp->registers + MSP_FLR)) & + TX_FIFO_FULL)) { + message->tx_offset += 4; + stm_msp_write(*(u32 *) message->txdata, + xfer_data->msp->registers + MSP_DR); + message->txdata += 4; + } +} + +/** + * set_transmit_protocol_descriptor - Set the Transmit Configuration register. + * @msp: main msp controller structure. + * @protocol_desc: pointer to protocol descriptor structure. + * @data_size: Run time configurable element length. + * + * It will setup transmit configuration register of msp. + * Various values related to a particular protocol can be set like, elemnet + * length, frame length, endianess etc. + * + * Returns void. + */ +static void set_transmit_protocol_descriptor(struct msp *msp, + struct msp_protocol_desc + *protocol_desc, + enum msp_data_size data_size) +{ + u32 temp_reg = 0; + + temp_reg |= MSP_P2_ENABLE_BIT(protocol_desc->tx_phase_mode); + temp_reg |= MSP_P2_START_MODE_BIT(protocol_desc->tx_phase2_start_mode); + temp_reg |= MSP_P1_FRAME_LEN_BITS(protocol_desc->tx_frame_length_1); + temp_reg |= MSP_P2_FRAME_LEN_BITS(protocol_desc->tx_frame_length_2); + if (msp->def_elem_len) { + temp_reg |= + MSP_P1_ELEM_LEN_BITS(protocol_desc->tx_element_length_1); + temp_reg |= + MSP_P2_ELEM_LEN_BITS(protocol_desc->tx_element_length_2); + if (protocol_desc->tx_element_length_1 == + protocol_desc->tx_element_length_2) { + msp->actual_data_size = + protocol_desc->tx_element_length_1; + } else { + msp->actual_data_size = data_size; + } + } else { + temp_reg |= MSP_P1_ELEM_LEN_BITS(data_size); + temp_reg |= MSP_P2_ELEM_LEN_BITS(data_size); + msp->actual_data_size = data_size; + } + temp_reg |= MSP_DATA_DELAY_BITS(protocol_desc->tx_data_delay); + temp_reg |= + MSP_SET_ENDIANNES_BIT(protocol_desc->tx_bit_transfer_format); + temp_reg |= MSP_FRAME_SYNC_POL(protocol_desc->tx_frame_sync_pol); + temp_reg |= MSP_DATA_WORD_SWAP(protocol_desc->tx_half_word_swap); + temp_reg |= MSP_SET_COMPANDING_MODE(protocol_desc->compression_mode); + temp_reg |= MSP_SET_FRAME_SYNC_IGNORE(protocol_desc->frame_sync_ignore); + + stm_msp_write(temp_reg, msp->registers + MSP_TCF); +} + +/** + * set_receive_protocol_descriptor - Set the Receive Configuration register. + * @msp: main msp controller structure. + * @protocol_desc: pointer to protocol descriptor structure. + * @data_size: Run time configurable element length. + * + * It will setup receive configuration register of msp. + * Various values related to a particular protocol can be set like, elemnet + * length, frame length, endianess etc. + * + * Returns void. + */ +static void set_receive_protocol_descriptor(struct msp *msp, + struct msp_protocol_desc + *protocol_desc, + enum msp_data_size + data_size) +{ + u32 temp_reg = 0; + + temp_reg |= MSP_P2_ENABLE_BIT(protocol_desc->rx_phase_mode); + temp_reg |= MSP_P2_START_MODE_BIT(protocol_desc->rx_phase2_start_mode); + temp_reg |= MSP_P1_FRAME_LEN_BITS(protocol_desc->rx_frame_length_1); + temp_reg |= MSP_P2_FRAME_LEN_BITS(protocol_desc->rx_frame_length_2); + if (msp->def_elem_len) { + temp_reg |= + MSP_P1_ELEM_LEN_BITS(protocol_desc->rx_element_length_1); + temp_reg |= + MSP_P2_ELEM_LEN_BITS(protocol_desc->rx_element_length_2); + if (protocol_desc->rx_element_length_1 == + protocol_desc->rx_element_length_2) { + msp->actual_data_size = + protocol_desc->rx_element_length_1; + } else { + msp->actual_data_size = data_size; + } + } else { + temp_reg |= MSP_P1_ELEM_LEN_BITS(data_size); + temp_reg |= MSP_P2_ELEM_LEN_BITS(data_size); + msp->actual_data_size = data_size; + } + + temp_reg |= MSP_DATA_DELAY_BITS(protocol_desc->rx_data_delay); + temp_reg |= + MSP_SET_ENDIANNES_BIT(protocol_desc->rx_bit_transfer_format); + temp_reg |= MSP_FRAME_SYNC_POL(protocol_desc->rx_frame_sync_pol); + temp_reg |= MSP_DATA_WORD_SWAP(protocol_desc->rx_half_word_swap); + temp_reg |= MSP_SET_COMPANDING_MODE(protocol_desc->expansion_mode); + temp_reg |= MSP_SET_FRAME_SYNC_IGNORE(protocol_desc->frame_sync_ignore); + + stm_msp_write(temp_reg, msp->registers + MSP_RCF); + +} + +/** + * configure_protocol - Configures transmit and receive protocol. + * @msp: main msp controller structure. + * @config: configuration structure passed by client driver + * + * This will configure transmit and receive protocol decriptors. + * + * Returns error(-1) on failure else success(0). + */ +static int configure_protocol(struct msp *msp, + struct msp_config *config) +{ + int direction; + struct msp_protocol_desc *protocol_desc; + enum msp_data_size data_size; + u32 temp_reg = 0; + + data_size = config->data_size; + msp->def_elem_len = config->def_elem_len; + direction = config->direction; + if (config->default_protocol_desc == 1) { + if (config->protocol >= MSP_INVALID_PROTOCOL) { + printk(KERN_ERR + "invalid protocol in configure_protocol()\n"); + return -EINVAL; + } + protocol_desc = + (struct msp_protocol_desc *)&protocol_desc_tab[config-> + protocol]; + } else { + protocol_desc = + (struct msp_protocol_desc *)&config->protocol_desc; + } + + if (data_size < MSP_DATA_BITS_DEFAULT + || data_size > MSP_DATA_BITS_32) { + printk(KERN_ERR + "invalid data size requested in configure_protocol()\n"); + return -EINVAL; + } + + switch (direction) { + case MSP_TRANSMIT_MODE: + set_transmit_protocol_descriptor(msp, protocol_desc, data_size); + break; + case MSP_RECEIVE_MODE: + set_receive_protocol_descriptor(msp, protocol_desc, data_size); + break; + case MSP_BOTH_T_R_MODE: + set_transmit_protocol_descriptor(msp, protocol_desc, data_size); + set_receive_protocol_descriptor(msp, protocol_desc, data_size); + break; + default: + printk(KERN_ERR "Invalid direction given\n"); + return -EINVAL; + } + /* The below code is needed for both Rx and Tx path can't separate + * them. + */ + temp_reg = stm_msp_read(msp->registers + MSP_GCR) & ~TX_CLK_POL_RISING; + temp_reg |= MSP_TX_CLKPOL_BIT(protocol_desc->tx_clock_pol); + stm_msp_write(temp_reg, msp->registers + MSP_GCR); + temp_reg = stm_msp_read(msp->registers + MSP_GCR) & ~RX_CLK_POL_RISING; + temp_reg |= MSP_RX_CLKPOL_BIT(protocol_desc->rx_clock_pol); + stm_msp_write(temp_reg, msp->registers + MSP_GCR); + + return 0; +} + +/** + * configure_clock - Set clock in sample rate generator. + * @msp: main msp controller structure. + * @config: configuration structure passed by client driver + * + * This will set the frame width and period. Also enable sample rate generator + * + * Returns error(-1) on failure else success(0). + */ +static int configure_clock(struct msp *msp, + struct msp_config *config) +{ + + u32 dummy; + u32 frame_per = 0; + u32 sck_div = 0; + u32 frame_width = 0; + u32 temp_reg = 0; + u32 bit_clock = 0; + struct msp_protocol_desc *protocol_desc = NULL; + stm_msp_write((stm_msp_read(msp->registers + MSP_GCR) & + (~(SRG_ENABLE))), msp->registers + MSP_GCR); + + if (config->default_protocol_desc) { + protocol_desc = + (struct msp_protocol_desc *)&protocol_desc_tab[config-> + protocol]; + } else { + protocol_desc = + (struct msp_protocol_desc *)&config->protocol_desc; + } + + switch (config->protocol) { + case MSP_PCM_PROTOCOL: + case MSP_PCM_COMPAND_PROTOCOL: + frame_width = protocol_desc->frame_width; + sck_div = + config->input_clock_freq / (config->frame_freq * + (protocol_desc-> + total_clocks_for_one_frame)); + frame_per = protocol_desc->frame_period; + break; + case MSP_I2S_PROTOCOL: + frame_width = protocol_desc->frame_width; + sck_div = + config->input_clock_freq / (config->frame_freq * + (protocol_desc-> + total_clocks_for_one_frame)); + frame_per = protocol_desc->frame_period; + + break; + case MSP_AC97_PROTOCOL: + /* Not supported */ + printk(KERN_WARNING "AC97 protocol not supported\n"); + return -ENOSYS; + default: + printk(KERN_ERR "Invalid mode attempted for setting clocks\n"); + return -EINVAL; + } + + temp_reg = (sck_div - 1) & SCK_DIV_MASK; + temp_reg |= FRAME_WIDTH_BITS(frame_width); + temp_reg |= FRAME_PERIOD_BITS(frame_per); + stm_msp_write(temp_reg, msp->registers + MSP_SRG); + + /* Input clock frequency value configured is in MHz/1000 */ + bit_clock = (config->input_clock_freq * 1000)/(sck_div + 1); + + /* If the bit clock is higher than 19.2MHz, Vape should be run in 100% OPP */ + /* Only consider OPP 100% when bit-clock is used, i.e. MSP master mode */ + if ((bit_clock > 19200000) && ((config->tx_clock_sel != 0) || (config->rx_clock_sel != 0))) { + prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP, "msp_i2s", 100); + msp->vape_opp_constraint = 1; + } else { + prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP, "msp_i2s", 50); + msp->vape_opp_constraint = 0; + } + + /* Wait a bit */ + dummy = ((stm_msp_read(msp->registers + MSP_SRG)) >> FRWID_SHIFT) & 0x0000003F; + + /* Enable clock */ + stm_msp_write((stm_msp_read(msp->registers + MSP_GCR) | ((SRG_ENABLE))), + msp->registers + MSP_GCR); + + /* Another wait */ + dummy = + ((stm_msp_read(msp->registers + MSP_SRG)) >> FRWID_SHIFT) & + 0x0000003F; + return 0; +} + +/** + * configure_multichannel - Enable multichannel support for transmit & receive. + * @msp: main msp controller structure. + * @config: configuration structure passed by client driver + * + * This will enable multichannel support for transmit and receive. + * It will set Receive comparator also if configured. + * + * Returns error(-1) on failure else success(0). + */ +static int configure_multichannel(struct msp *msp, + struct msp_config *config) +{ + struct msp_protocol_desc *protocol_desc; + struct msp_multichannel_config *mult_config; + if (config->default_protocol_desc == 1) { + if (config->protocol >= MSP_INVALID_PROTOCOL) { + printk(KERN_ERR + "invalid protocol in configure_protocol()\n"); + return -EINVAL; + } + protocol_desc = + (struct msp_protocol_desc *)&protocol_desc_tab[config-> + protocol]; + } else { + protocol_desc = + (struct msp_protocol_desc *)&config->protocol_desc; + } + mult_config = &config->multichannel_config; + if (true == mult_config->tx_multichannel_enable) { + if (MSP_SINGLE_PHASE == protocol_desc->tx_phase_mode) { + stm_msp_write((stm_msp_read(msp->registers + MSP_MCR) | + ((mult_config-> + tx_multichannel_enable << TMCEN_BIT) & + (0x0000020))), + msp->registers + MSP_MCR); + stm_msp_write(mult_config->tx_channel_0_enable, + msp->registers + MSP_TCE0); + stm_msp_write(mult_config->tx_channel_1_enable, + msp->registers + MSP_TCE1); + stm_msp_write(mult_config->tx_channel_2_enable, + msp->registers + MSP_TCE2); + stm_msp_write(mult_config->tx_channel_3_enable, + msp->registers + MSP_TCE3); + } else { + printk(KERN_ERR "Not in authorised mode\n"); + return -1; + } + } + if (true == mult_config->rx_multichannel_enable) { + if (MSP_SINGLE_PHASE == protocol_desc->rx_phase_mode) { + stm_msp_write((stm_msp_read(msp->registers + MSP_MCR) | + ((mult_config-> + rx_multichannel_enable << RMCEN_BIT) & + (0x0000001))), + msp->registers + MSP_MCR); + stm_msp_write(mult_config->rx_channel_0_enable, + msp->registers + MSP_RCE0); + stm_msp_write(mult_config->rx_channel_1_enable, + msp->registers + MSP_RCE1); + stm_msp_write(mult_config->rx_channel_2_enable, + msp->registers + MSP_RCE2); + stm_msp_write(mult_config->rx_channel_3_enable, + msp->registers + MSP_RCE3); + } else { + printk(KERN_ERR "Not in authorised mode\n"); + return -1; + } + if (mult_config->rx_comparison_enable_mode) { + stm_msp_write((stm_msp_read(msp->registers + MSP_MCR) | + ((mult_config-> + rx_comparison_enable_mode << RCMPM_BIT) + & (0x0000018))), + msp->registers + MSP_MCR); + + stm_msp_write(mult_config->comparison_mask, + msp->registers + MSP_RCM); + stm_msp_write(mult_config->comparison_value, + msp->registers + MSP_RCV); + + } + } + return 0; + +} + +/** + * configure_dma - configure dma channel for transmit or receive. + * @msp: msp structure + * @config: configuration structure. + * Context: process + * + * It will configure dma channels and request them in Logical mode for both + * transmit and recevie modes.It also register the respective callback handlers + * for DMA. + * + * Returns void. + */ +void configure_dma(struct msp *msp, struct msp_config *config) +{ + struct stedma40_chan_cfg *rx_dma_info = msp->dma_cfg_rx; + struct stedma40_chan_cfg *tx_dma_info = msp->dma_cfg_tx; + dma_cap_mask_t mask; + + if (config->direction == MSP_TRANSMIT_MODE + || config->direction == MSP_BOTH_T_R_MODE) { + + if (msp->tx_pipeid != NULL) { + dma_release_channel(msp->tx_pipeid); + msp->tx_pipeid = NULL; + } + + if (config->data_size == MSP_DATA_BITS_32) + tx_dma_info->src_info.data_width = STEDMA40_WORD_WIDTH; + else if (config->data_size == MSP_DATA_BITS_16) + tx_dma_info->src_info.data_width + = STEDMA40_HALFWORD_WIDTH; + else if (config->data_size == MSP_DATA_BITS_8) + tx_dma_info->src_info.data_width + = STEDMA40_BYTE_WIDTH; + else + printk(KERN_ERR "Wrong data size\n"); + + if (config->data_size == MSP_DATA_BITS_32) + tx_dma_info->dst_info.data_width = STEDMA40_WORD_WIDTH; + else if (config->data_size == MSP_DATA_BITS_16) + tx_dma_info->dst_info.data_width + = STEDMA40_HALFWORD_WIDTH; + else if (config->data_size == MSP_DATA_BITS_8) + tx_dma_info->dst_info.data_width + = STEDMA40_BYTE_WIDTH; + else + printk(KERN_ERR "Wrong data size\n"); + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + msp->tx_pipeid = dma_request_channel(mask, stedma40_filter, + tx_dma_info); + } + if (config->direction == MSP_RECEIVE_MODE + || config->direction == MSP_BOTH_T_R_MODE) { + + if (msp->rx_pipeid != NULL) { + dma_release_channel(msp->rx_pipeid); + msp->rx_pipeid = NULL; + } + + if (config->data_size == MSP_DATA_BITS_32) + rx_dma_info->src_info.data_width = STEDMA40_WORD_WIDTH; + else if (config->data_size == MSP_DATA_BITS_16) + rx_dma_info->src_info.data_width + = STEDMA40_HALFWORD_WIDTH; + else if (config->data_size == MSP_DATA_BITS_8) + rx_dma_info->src_info.data_width = STEDMA40_BYTE_WIDTH; + else + printk(KERN_ERR "Wrong data size\n"); + + if (config->data_size == MSP_DATA_BITS_32) + rx_dma_info->dst_info.data_width = STEDMA40_WORD_WIDTH; + else if (config->data_size == MSP_DATA_BITS_16) + rx_dma_info->dst_info.data_width + = STEDMA40_HALFWORD_WIDTH; + else if (config->data_size == MSP_DATA_BITS_8) + rx_dma_info->dst_info.data_width = STEDMA40_BYTE_WIDTH; + else + printk(KERN_ERR "Wrong data size\n"); + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + msp->rx_pipeid = dma_request_channel(mask, stedma40_filter, + rx_dma_info); + } + +} + +/** + * msp_enable - Setup the msp configuration. + * @msp: msp data contains main msp structure. + * @config: configuration structure sent by i2s client driver. + * Context: process + * + * Main msp configuring functions to configure msp in accordance with msp + * protocol descriptor, configuring msp clock,setup transfer mode selected by + * user like DMA, interrupt or polling and in the end enable RX and Tx path. + * + * Returns error(-1) in case of failure or success(0). + */ +static int msp_enable(struct msp *msp, struct msp_config *config) +{ + int status = 0; + int state; + + /* Check msp state whether in RUN or CONFIGURED Mode */ + state = msp->msp_state; + if (state == MSP_STATE_IDLE) { + if (msp->plat_init) { + status = msp->plat_init(); + if (status) { + printk(KERN_ERR "Error in msp_i2s_init," + " status is %d\n", status); + return status; + } + } + } + + /* Configure msp with protocol dependent settings */ + configure_protocol(msp, config); + configure_clock(msp, config); + if (config->multichannel_configured == 1) { + status = configure_multichannel(msp, config); + if (status) + printk(KERN_ERR "multichannel can't be configured\n"); + } + msp->work_mode = config->work_mode; + + if (msp->work_mode == MSP_DMA_MODE && !msp->dma_cfg_rx) { + switch (config->direction) { + case MSP_RECEIVE_MODE: + case MSP_BOTH_T_R_MODE: + dev_err(&msp->i2s_cont->dev, "RX DMA not available"); + return -EINVAL; + } + } + + if (msp->work_mode == MSP_DMA_MODE && !msp->dma_cfg_tx) { + switch (config->direction) { + case MSP_TRANSMIT_MODE: + case MSP_BOTH_T_R_MODE: + dev_err(&msp->i2s_cont->dev, "TX DMA not available"); + return -EINVAL; + } + } + + switch (config->direction) { + case MSP_TRANSMIT_MODE: + /*Currently they are ignored + stm_msp_write((stm_msp_read(msp->registers + MSP_IMSC) | + TRANSMIT_UNDERRUN_ERR_INT | + TRANSMIT_FRAME_SYNC_ERR_INT), + msp->registers + MSP_IMSC); */ + if (config->work_mode == MSP_DMA_MODE) { + stm_msp_write(stm_msp_read(msp->registers + MSP_DMACR) | + TX_DMA_ENABLE, + msp->registers + MSP_DMACR); + + msp->xfer_data.tx_handler = config->handler; + msp->xfer_data.tx_callback_data = + config->tx_callback_data; + configure_dma(msp, config); + } + if (config->work_mode == MSP_POLLING_MODE) { + stm_msp_write((stm_msp_read(msp->registers + MSP_GCR) | + (TX_ENABLE)), msp->registers + MSP_GCR); + } + if (msp->work_mode != MSP_DMA_MODE) { + switch (msp->actual_data_size) { + case MSP_DATA_BITS_8: + msp->write = u8_msp_write; + break; + case MSP_DATA_BITS_10: + case MSP_DATA_BITS_12: + case MSP_DATA_BITS_14: + case MSP_DATA_BITS_16: + msp->write = u16_msp_write; + break; + case MSP_DATA_BITS_20: + case MSP_DATA_BITS_24: + case MSP_DATA_BITS_32: + default: + msp->write = u32_msp_write; + break; + } + msp->xfer_data.tx_handler = config->handler; + msp->xfer_data.tx_callback_data = + config->tx_callback_data; + msp->xfer_data.rx_callback_data = + config->rx_callback_data; + msp->xfer_data.msp = msp; + } + break; + case MSP_RECEIVE_MODE: + /*Currently they are ignored + stm_msp_write(stm_msp_read(msp->registers + MSP_IMSC) | + RECEIVE_OVERRUN_ERROR_INT | RECEIVE_FRAME_SYNC_ERR_INT, + msp->registers + MSP_IMSC); */ + if (config->work_mode == MSP_DMA_MODE) { + stm_msp_write(stm_msp_read(msp->registers + MSP_DMACR) | + RX_DMA_ENABLE, + msp->registers + MSP_DMACR); + + msp->xfer_data.rx_handler = config->handler; + msp->xfer_data.rx_callback_data = + config->rx_callback_data; + + configure_dma(msp, config); + } + if (config->work_mode == MSP_POLLING_MODE) { + stm_msp_write((stm_msp_read(msp->registers + MSP_GCR) | + (RX_ENABLE)), msp->registers + MSP_GCR); + } + if (msp->work_mode != MSP_DMA_MODE) { + switch (msp->actual_data_size) { + case MSP_DATA_BITS_8: + msp->read = u8_msp_read; + break; + case MSP_DATA_BITS_10: + case MSP_DATA_BITS_12: + case MSP_DATA_BITS_14: + case MSP_DATA_BITS_16: + msp->read = u16_msp_read; + break; + case MSP_DATA_BITS_20: + case MSP_DATA_BITS_24: + case MSP_DATA_BITS_32: + default: + msp->read = u32_msp_read; + break; + } + msp->xfer_data.rx_handler = config->handler; + msp->xfer_data.tx_callback_data = + config->tx_callback_data; + msp->xfer_data.rx_callback_data = + config->rx_callback_data; + msp->xfer_data.msp = msp; + } + + break; + case MSP_BOTH_T_R_MODE: + /*Currently they are ignored + stm_msp_write(stm_msp_read(msp->registers + MSP_IMSC) | + RECEIVE_OVERRUN_ERROR_INT | RECEIVE_FRAME_SYNC_ERR_INT | + TRANSMIT_UNDERRUN_ERR_INT | TRANSMIT_FRAME_SYNC_ERR_INT , + msp->registers + MSP_IMSC); */ + if (config->work_mode == MSP_DMA_MODE) { + stm_msp_write(stm_msp_read(msp->registers + MSP_DMACR) | + RX_DMA_ENABLE | TX_DMA_ENABLE, + msp->registers + MSP_DMACR); + + msp->xfer_data.tx_handler = config->handler; + msp->xfer_data.rx_handler = config->handler; + msp->xfer_data.tx_callback_data = + config->tx_callback_data; + msp->xfer_data.rx_callback_data = + config->rx_callback_data; + + configure_dma(msp, config); + } + if (config->work_mode == MSP_POLLING_MODE) { + stm_msp_write((stm_msp_read(msp->registers + MSP_GCR) | + (TX_ENABLE)), msp->registers + MSP_GCR); + stm_msp_write((stm_msp_read(msp->registers + MSP_GCR) | + (RX_ENABLE)), msp->registers + MSP_GCR); + } + if (msp->work_mode != MSP_DMA_MODE) { + switch (msp->actual_data_size) { + case MSP_DATA_BITS_8: + msp->read = u8_msp_read; + msp->write = u8_msp_write; + break; + case MSP_DATA_BITS_10: + case MSP_DATA_BITS_12: + case MSP_DATA_BITS_14: + case MSP_DATA_BITS_16: + msp->read = u16_msp_read; + msp->write = u16_msp_write; + break; + case MSP_DATA_BITS_20: + case MSP_DATA_BITS_24: + case MSP_DATA_BITS_32: + default: + msp->read = u32_msp_read; + msp->write = u32_msp_write; + break; + } + msp->xfer_data.tx_handler = config->handler; + msp->xfer_data.rx_handler = config->handler; + msp->xfer_data.tx_callback_data = + config->tx_callback_data; + msp->xfer_data.rx_callback_data = + config->rx_callback_data; + msp->xfer_data.msp = msp; + } + + break; + default: + printk(KERN_ERR "Invalid direction parameter\n"); + if (msp->plat_exit) + msp->plat_exit(); + status = -EINVAL; + return status; + } + + switch (config->work_mode) { + case MSP_DMA_MODE: + msp->transfer = msp_dma_xfer; + break; + case MSP_POLLING_MODE: + msp->transfer = msp_polling_xfer; + break; + case MSP_INTERRUPT_MODE: + msp->transfer = msp_interrupt_xfer; + break; + default: + msp->transfer = NULL; + } + + stm_msp_write(config->iodelay, msp->registers + MSP_IODLY); + + /* enable frame generation logic */ + stm_msp_write((stm_msp_read(msp->registers + MSP_GCR) | + (FRAME_GEN_ENABLE)), msp->registers + MSP_GCR); + + return status; +} + +/** + * flush_rx_fifo - Flush Rx fifo MSP controller. + * @msp: msp structure. + * + * This function flush the rx fifo of msp controller. + * + * Returns error(-1) in case of failure else success(0) + */ +static void flush_rx_fifo(struct msp *msp) +{ + u32 dummy = 0; + u32 limit = 32; + u32 cur = stm_msp_read(msp->registers + MSP_GCR); + stm_msp_write(cur | RX_ENABLE, msp->registers + MSP_GCR); + while (!(stm_msp_read(msp->registers + MSP_FLR) & RX_FIFO_EMPTY) + && limit--) { + dummy = stm_msp_read(msp->registers + MSP_DR); + } + stm_msp_write(cur, msp->registers + MSP_GCR); +} + +/** + * flush_tx_fifo - Flush Tx fifo MSP controller. + * @msp: msp structure. + * + * This function flush the tx fifo using test intergration register to read data + * from tx fifo directly. + * + * Returns error(-1) in case of failure else success(0) + */ +static void flush_tx_fifo(struct msp *msp) +{ + u32 dummy = 0; + u32 limit = 32; + u32 cur = stm_msp_read(msp->registers + MSP_GCR); + stm_msp_write(cur | TX_ENABLE, msp->registers + MSP_GCR); + stm_msp_write(0x3, msp->registers + MSP_ITCR); + while (!(stm_msp_read(msp->registers + MSP_FLR) & TX_FIFO_EMPTY) + && limit--) { + dummy = stm_msp_read(msp->registers + MSP_TSTDR); + } + stm_msp_write(0x0, msp->registers + MSP_ITCR); + stm_msp_write(cur, msp->registers + MSP_GCR); +} + +/** + * stm_msp_configure_enable - configures and enables the MSP controller. + * @i2s_cont: i2s controller sent by i2s device. + * @configuration: specifies the configuration parameters. + * + * This function configures the msp controller with the client configuration. + * + * Returns error(-1) in case of failure else success(0) + */ +static int stm_msp_configure_enable(struct i2s_controller *i2s_cont, + void *configuration) +{ + u32 old_reg; + u32 new_reg; + u32 mask; + int res; + struct msp_config *config = + (struct msp_config *)configuration; + struct msp *msp = (struct msp *)i2s_cont->data; + + if (in_interrupt()) { + printk(KERN_ERR + "can't call configure_enable in interrupt context\n"); + return -1; + } + + /* Two simultanous configuring msp is avoidable */ + down(&msp->lock); + switch (msp->users) { + case 0: + res = regulator_enable(msp_vape_supply); + if (res != 0) { + dev_err(&msp->i2s_cont->dev, + "Failed to enable regulator\n"); + up(&msp->lock); + return res; + } + + clk_enable(msp->clk); + msp->direction = config->direction; + break; + case 1: + if (msp->direction == MSP_BOTH_T_R_MODE || + config->direction == msp->direction || + config->direction == MSP_BOTH_T_R_MODE) { + dev_notice(&i2s_cont->dev, "%s: MSP in use in the " + "desired direction.\n", __func__); + up(&msp->lock); + return -EBUSY; + } + msp->direction = MSP_BOTH_T_R_MODE; + break; + default: + dev_notice(&i2s_cont->dev, "%s: MSP in use in both " + "directions.\n", __func__); + up(&msp->lock); + return -EBUSY; + } + msp->users++; + + /* First do the global config register */ + mask = + RX_CLK_SEL_MASK | TX_CLK_SEL_MASK | RX_FRAME_SYNC_MASK | + TX_FRAME_SYNC_MASK | RX_SYNC_SEL_MASK | TX_SYNC_SEL_MASK | + RX_FIFO_ENABLE_MASK | TX_FIFO_ENABLE_MASK | SRG_CLK_SEL_MASK | + LOOPBACK_MASK | TX_EXTRA_DELAY_MASK; + + new_reg = + (config->tx_clock_sel | config->rx_clock_sel | config-> + rx_frame_sync_pol | config->tx_frame_sync_pol | config-> + rx_frame_sync_sel | config->tx_frame_sync_sel | config-> + rx_fifo_config | config->tx_fifo_config | config-> + srg_clock_sel | config->loopback_enable | config->tx_data_enable); + + old_reg = stm_msp_read(msp->registers + MSP_GCR); + old_reg &= ~mask; + new_reg |= old_reg; + stm_msp_write(new_reg, msp->registers + MSP_GCR); + + if (msp_enable(msp, config) != 0) { + printk(KERN_ERR "error enabling MSP\n"); + return -EBUSY; + } + if (config->loopback_enable & 0x80) + msp->loopback_enable = 1; + /*Sometimes FIFO doesn't gets empty hence limit is provided */ + flush_tx_fifo(msp); + /*This has been added in order to fix fifo flush problem + When last xfer occurs some data remains in fifo. In order to + flush that data delay is needed */ + msleep(10); + /* wait for fifo to flush */ + flush_rx_fifo(msp); + + /* RX_BUSY take a while to clear */ + msleep(10); + + msp->msp_state = MSP_STATE_CONFIGURED; + up(&msp->lock); + return 0; +} + +static int msp_start_dma(struct msp *msp, int transmit, dma_addr_t data, + size_t bytes) +{ + struct dma_async_tx_descriptor *desc; + struct scatterlist sg; + + sg_init_table(&sg, 1); + sg_set_page(&sg, pfn_to_page(PFN_DOWN(data)), bytes, + offset_in_page(data)); + sg_dma_address(&sg) = data; + sg_dma_len(&sg) = bytes; + + if (transmit) { + if (!msp->tx_pipeid) + return -EINVAL; + + desc = msp->tx_pipeid->device-> + device_prep_slave_sg(msp->tx_pipeid, + &sg, 1, DMA_TO_DEVICE, + DMA_PREP_INTERRUPT + | DMA_CTRL_ACK); + if (!desc) + return -ENOMEM; + + desc->callback = msp->xfer_data.tx_handler; + desc->callback_param = msp->xfer_data.tx_callback_data; + desc->tx_submit(desc); + dma_async_issue_pending(msp->tx_pipeid); + } else { + if (!msp->rx_pipeid) + return -EINVAL; + + desc = msp->rx_pipeid->device-> + device_prep_slave_sg(msp->rx_pipeid, + &sg, 1, DMA_FROM_DEVICE, + DMA_PREP_INTERRUPT + | DMA_CTRL_ACK); + if (!desc) + return -EBUSY; + + desc->callback = msp->xfer_data.rx_handler; + desc->callback_param = msp->xfer_data.rx_callback_data; + desc->tx_submit(desc); + dma_async_issue_pending(msp->rx_pipeid); + } + + return 0; +} + +static int msp_single_dma_tx(struct msp *msp, dma_addr_t data, size_t bytes) +{ + int status; + status = msp_start_dma(msp, 1, data, bytes); + stm_msp_write((stm_msp_read(msp->registers + MSP_GCR) | (TX_ENABLE)), + msp->registers + MSP_GCR); + return status; +} + +static int msp_single_dma_rx(struct msp *msp, dma_addr_t data, size_t bytes) +{ + int status; + status = msp_start_dma(msp, 0, data, bytes); + stm_msp_write((stm_msp_read(msp->registers + MSP_GCR) | (RX_ENABLE)), + msp->registers + MSP_GCR); + return status; +} + +static void msp_cyclic_dma_start(struct msp *msp, + struct scatterlist *sg, + int sg_len, + enum dma_data_direction direction) +{ + struct stedma40_cyclic_desc *cdesc; + int ret; + struct dma_chan *pipeid = (direction == DMA_TO_DEVICE) ? + msp->tx_pipeid : + msp->rx_pipeid; + + cdesc = stedma40_cyclic_prep_sg(pipeid, + sg, + sg_len, + direction, + DMA_PREP_INTERRUPT); + if (IS_ERR(cdesc)) { + pr_err("%s: Error: stedma40_cyclic_prep_sg failed (%ld)!\n", + __func__, + PTR_ERR(cdesc)); + return; + } + + cdesc->period_callback = (direction == DMA_TO_DEVICE) ? + msp->xfer_data.tx_handler : + msp->xfer_data.rx_handler; + cdesc->period_callback_param = (direction == DMA_TO_DEVICE) ? + msp->xfer_data.tx_callback_data : + msp->xfer_data.rx_callback_data; + + ret = stedma40_cyclic_start(pipeid); + if (ret) { + pr_err("%s: stedma40_cyclic_start failed (%d)!\n", __func__, ret); + goto free; + } + + msp->infinite = true; + + return; + +free: + stedma40_cyclic_free(pipeid); +} + +/* Legacy function. Used by HATS driver. */ +static void msp_loopback_inf_start_dma(struct msp *msp, + dma_addr_t data, + size_t bytes) +{ + struct stedma40_cyclic_desc *rxcdesc; + struct stedma40_cyclic_desc *txcdesc; + struct scatterlist rxsg[2]; + struct scatterlist txsg[2]; + size_t len = bytes >> 1; + int ret; + + sg_init_table(rxsg, ARRAY_SIZE(rxsg)); + sg_init_table(txsg, ARRAY_SIZE(txsg)); + + sg_dma_len(&rxsg[0]) = len; + sg_dma_len(&rxsg[1]) = len; + sg_dma_len(&txsg[0]) = len; + sg_dma_len(&txsg[1]) = len; + + sg_dma_address(&rxsg[0]) = data; + sg_dma_address(&rxsg[1]) = data + len; + + sg_dma_address(&txsg[0]) = data + len; + sg_dma_address(&txsg[1]) = data; + + rxcdesc = stedma40_cyclic_prep_sg(msp->rx_pipeid, + rxsg, ARRAY_SIZE(rxsg), + DMA_FROM_DEVICE, 0); + if (IS_ERR(rxcdesc)) + return; + + txcdesc = stedma40_cyclic_prep_sg(msp->tx_pipeid, + txsg, ARRAY_SIZE(txsg), + DMA_TO_DEVICE, 0); + if (IS_ERR(txcdesc)) + goto free_rx; + + ret = stedma40_cyclic_start(msp->rx_pipeid); + if (ret) + goto free_tx; + + ret = stedma40_cyclic_start(msp->tx_pipeid); + if (ret) + goto stop_rx; + + msp->infinite = true; + + return; + +stop_rx: + stedma40_cyclic_stop(msp->rx_pipeid); +free_tx: + stedma40_cyclic_free(msp->tx_pipeid); +free_rx: + stedma40_cyclic_free(msp->rx_pipeid); +} + +/** + * msp_dma_xfer - Handles DMA transfers over i2s bus. + * @msp: main msp structure. + * @msg: i2s_message contains info about transmit and receive data. + * Context: process + * + * This will first check whether data buffer is dmaable or not. + * Call dma_map_single apis etc to make it dmaable dma. Starts the dma transfer + * for TX and RX parallely and wait for it to get completed. + * + * Returns error(-1) in case of failure or success(0). + */ +static int msp_dma_xfer(struct msp *msp, struct i2s_message *msg) +{ + int status = 0; + + switch (msg->i2s_transfer_mode) { + default: + case I2S_TRANSFER_MODE_SINGLE_DMA: + if (msg->i2s_direction == I2S_DIRECTION_RX || + msg->i2s_direction == I2S_DIRECTION_BOTH) + if (msg->rxdata && (msg->rxbytes > 0)) { + if (!msg->dma_flag) + msg->rxdata = + (void *)dma_map_single(NULL, + msg->rxdata, + msg->rxbytes, + DMA_FROM_DEVICE + ); + status = msp_single_dma_rx(msp, + (dma_addr_t)msg->rxdata, + msg->rxbytes); + } + if (msg->i2s_direction == I2S_DIRECTION_TX || + msg->i2s_direction == I2S_DIRECTION_BOTH) + if (msg->txdata && (msg->txbytes > 0)) { + if (!msg->dma_flag) + msg->txdata = + (void *)dma_map_single(NULL, + msg->txdata, + msg->txbytes, + DMA_TO_DEVICE); + status = msp_single_dma_tx(msp, + (dma_addr_t)msg->txdata, + msg->txbytes); + } + break; + + case I2S_TRANSFER_MODE_CYCLIC_DMA: + if (msg->i2s_direction == I2S_DIRECTION_TX) { + msp_cyclic_dma_start(msp, + msg->sg, + msg->sg_len, + DMA_TO_DEVICE); + stm_msp_write((stm_msp_read(msp->registers + MSP_GCR) | + (TX_ENABLE)), + msp->registers + MSP_GCR); + } else { + msp_cyclic_dma_start(msp, + msg->sg, + msg->sg_len, + DMA_FROM_DEVICE); + stm_msp_write((stm_msp_read(msp->registers + MSP_GCR) | + (RX_ENABLE)), + msp->registers + MSP_GCR); + } + break; + + case I2S_TRANSFER_MODE_INF_LOOPBACK: + msp_loopback_inf_start_dma(msp, + (dma_addr_t)msg->rxdata, + msg->rxbytes); + stm_msp_write((stm_msp_read(msp->registers + MSP_GCR) | + (RX_ENABLE)), + msp->registers + MSP_GCR); + stm_msp_write((stm_msp_read(msp->registers + MSP_GCR) | + (TX_ENABLE)), + msp->registers + MSP_GCR); + break; + } + + return status; +} + +#if 0 +/** + * msp_handle_irq - Interrupt handler routine. + * @irq: irq no. + * @dev_id: device structure registered in request irq. + * + * Returns error(-1) on failure else success(0). + */ +static irqreturn_t msp_handle_irq(int irq, void *dev_id) +{ + u32 irq_status; + struct msp *msp = (struct msp *)dev_id; + struct i2s_message *message = &msp->xfer_data.message; + u32 irq_mask = 0; + irq_status = stm_msp_read(msp->registers + MSP_MIS); + irq_mask = stm_msp_read(msp->registers + MSP_IMSC); +/* Disable the interrupt to prevent immediate recurrence */ + stm_msp_write(stm_msp_read(msp->registers + MSP_IMSC) & ~irq_status, + msp->registers + MSP_IMSC); + +/* Clear the interrupt */ + stm_msp_write(irq_status, msp->registers + MSP_ICR); +/* Check for an error condition */ + msp->msp_io_error = irq_status & (RECEIVE_OVERRUN_ERROR_INT | + RECEIVE_FRAME_SYNC_ERR_INT | + TRANSMIT_UNDERRUN_ERR_INT | + TRANSMIT_FRAME_SYNC_ERR_INT); + + /*Currently they are ignored */ + if (irq_status & RECEIVE_OVERRUN_ERROR_INT) + ; + if (irq_status & TRANSMIT_UNDERRUN_ERR_INT) + ; + + /* This code has been added basically to support loopback mode + * Basically Transmit interrupt is not disabled even after its + * completion so that receive fifo gets an additional interrupt + */ + if (irq_mask & (RECEIVE_SERVICE_INT) + && (irq_mask & (TRANSMIT_SERVICE_INT)) && (msp->loopback_enable)) { + if (msp->read) + msp->read(&msp->xfer_data); + if (msp->write) + msp->write(&msp->xfer_data); + if (message->rx_offset >= message->rxbytes) { + if (msp->xfer_data.rx_handler) + msp->xfer_data.rx_handler(msp-> + xfer_data. + rx_callback_data, + message->rx_offset); + msp->xfer_data.rx_handler = NULL; + return IRQ_HANDLED; + } + + if (message->tx_offset >= message->txbytes) { + if (msp->xfer_data.tx_handler) + msp->xfer_data.tx_handler(msp->xfer_data. + tx_callback_data, + message->tx_offset); + msp->xfer_data.tx_handler = NULL; + } + stm_msp_write(irq_mask, msp->registers + MSP_IMSC); + return IRQ_HANDLED; + } + + if (irq_status & RECEIVE_SERVICE_INT) { + if (msp->read) + msp->read(&msp->xfer_data); + if (message->rx_offset >= message->rxbytes) { + irq_mask &= ~RECEIVE_SERVICE_INT; + stm_msp_write(irq_mask, msp->registers + MSP_IMSC); + if (msp->xfer_data.rx_handler) + msp->xfer_data.rx_handler(msp-> + xfer_data. + rx_callback_data, + message->rx_offset); + if (!(irq_status & TRANSMIT_SERVICE_INT)) + return IRQ_HANDLED; + } + } + if (irq_status & TRANSMIT_SERVICE_INT) { + if (msp->write) + msp->write(&msp->xfer_data); + if (message->tx_offset >= message->txbytes) { + irq_mask &= ~TRANSMIT_SERVICE_INT; + stm_msp_write(irq_mask, msp->registers + MSP_IMSC); + if (msp->xfer_data.tx_handler) + msp->xfer_data.tx_handler(msp->xfer_data. + tx_callback_data, + message->tx_offset); + return IRQ_HANDLED; + } + } + stm_msp_write(irq_mask, msp->registers + MSP_IMSC); + return IRQ_HANDLED; + +} +#endif + +/** + * msp_interrupt_xfer - Handles Interrupt transfers over i2s bus. + * @msp: main msp structure. + * @msg: i2s_message contains info about transmit and receive data. + * Context: Process or interrupt. + * + * This implements transfer and receive functions used in interrupt mode. + * This can be used in interrupt context if a callback handler is registered + * by client driver. This has been to improve performance in interrupt mode. + * Hence can't use sleep in this function. + * + * Returns error(-1) in case of failure or success(0). + */ +static int msp_interrupt_xfer(struct msp *msp, struct i2s_message *msg) +{ + struct i2s_message *message; + u32 irq_mask = 0; + + if (msg->i2s_transfer_mode != I2S_TRANSFER_MODE_NON_DMA) + return -EINVAL; + + if (msg->txbytes) { + msp->xfer_data.message.txbytes = msg->txbytes; + msp->xfer_data.message.txdata = msg->txdata; + msp->xfer_data.message.tx_offset = 0; + } + if (msg->rxbytes) { + msp->xfer_data.message.rxbytes = msg->rxbytes; + msp->xfer_data.message.rxdata = msg->rxdata; + msp->xfer_data.message.rx_offset = 0; + } + message = &msp->xfer_data.message; + if ((message->txdata == NULL || message->txbytes == 0) + && (message->rxdata == NULL || message->rxbytes == 0)) { + printk(KERN_ERR + "transmit_receive_data is NULL with bytes > 0\n"); + return -EINVAL; + } + + msp->msp_io_error = 0; + + if (message->tx_offset < message->txbytes) { + irq_mask |= TRANSMIT_SERVICE_INT; + stm_msp_write((stm_msp_read(msp->registers + MSP_GCR) | + (TX_ENABLE)), msp->registers + MSP_GCR); + } + if (message->rx_offset < message->rxbytes) { + irq_mask |= RECEIVE_SERVICE_INT; + stm_msp_write((stm_msp_read(msp->registers + MSP_GCR) | + (RX_ENABLE)), msp->registers + MSP_GCR); + } + stm_msp_write((stm_msp_read(msp->registers + MSP_IMSC) | + irq_mask), msp->registers + MSP_IMSC); + return 0; +} + +/** + * func_notify_timer - Handles Polling hang issue over i2s bus. + * @data: main msp data address + * Context: Interrupt. + * + * This is used to handle error condition in transfer and receive function used + * in polling mode. + * Sometimes due to passing wrong protocol desc , polling transfer may hang. + * To prevent this, timer is added. + * + * Returns void. + */ +static void func_notify_timer(unsigned long data) +{ + struct msp *msp = (struct msp *)data; + if (msp->polling_flag) { + msp->msp_io_error = 1; + printk(KERN_ERR + "Polling is taking two much time, may be it got hang\n"); + del_timer(&msp->notify_timer); + } +} + +/** + * msp_polling_xfer - Handles Polling transfers over i2s bus. + * @msp: main msp structure. + * @msg: i2s_message contains info about transmit and receive data. + * Context: Process. + * + * This implements transfer and receive functions used in polling mode. This is + * blocking fucntion. + * It is recommended to use interrupt or dma mode for better performance rather + * than the polling mode. + * + * Returns error(-1) in case of failure or success(0). + */ +static int msp_polling_xfer(struct msp *msp, struct i2s_message *msg) +{ + struct i2s_message *message; + u32 time_expire = 0; + u32 tr_ex = 0, rr_ex = 0; + u32 msec_jiffies = 0; + + if (msg->i2s_transfer_mode != I2S_TRANSFER_MODE_NON_DMA) + return -EINVAL; + + if (msg->txbytes) { + msp->xfer_data.message.txbytes = msg->txbytes; + msp->xfer_data.message.txdata = msg->txdata; + msp->xfer_data.message.tx_offset = 0; + tr_ex = msg->txbytes; + } + if (msg->rxbytes) { + msp->xfer_data.message.rxbytes = msg->rxbytes; + msp->xfer_data.message.rxdata = msg->rxdata; + msp->xfer_data.message.rx_offset = 0; + rr_ex = msg->rxbytes; + } + message = &msp->xfer_data.message; + time_expire = (tr_ex + rr_ex) / 1024; + if (!time_expire) + msec_jiffies = 500; + else + msec_jiffies = time_expire * 500; + msp->notify_timer.expires = jiffies + msecs_to_jiffies(msec_jiffies); + down(&msp->lock); + if (message->txdata == NULL && message->txbytes > 0) { + printk(KERN_ERR + "transmit_receive_data is NULL with bytes > 0\n"); + return -EINVAL; + } + + if (message->rxdata == NULL && message->rxbytes > 0) { + printk(KERN_ERR + "transmit_receive_data is NULL with bytes > 0\n"); + return -EINVAL; + } + msp->msp_io_error = 0; + msp->polling_flag = 1; + add_timer(&msp->notify_timer); + while (message->tx_offset < message->txbytes + || message->rx_offset < message->rxbytes) { + if (msp->msp_io_error) + break; + if (msp->read) + msp->read(&msp->xfer_data); + if (msp->write) + msp->write(&msp->xfer_data); + } + msp->polling_flag = 0; + del_timer(&msp->notify_timer); + up(&msp->lock); + return message->txbytes + message->rxbytes; +} + +/** + * stm_msp_transceive_data - Main i2s transfer function. + * @i2s_cont: i2s controller structure passed by client driver. + * @message: i2s message structure contains transceive info. + * Context: process or interrupt. + * + * This function is registered over i2s_xfer funtions. It will handle main i2s + * transfer over i2s bus in various modes.It call msp transfer function on which + * suitable transfer function is already registered i.e dma ,interrupt or + * polling function. + * + * Returns error(-1) in case of failure or success(0). + */ +static int stm_msp_transceive_data(struct i2s_controller *i2s_cont, + struct i2s_message *message) +{ + int status = 0; + struct msp *msp = (struct msp *)i2s_cont->data; + + if (!message || (msp->msp_state == MSP_STATE_IDLE)) { + printk(KERN_ERR "Message is NULL\n"); + return -EPERM; + } + + msp->msp_state = MSP_STATE_RUN; + if (msp->transfer) + status = msp->transfer(msp, message); + + if (msp->msp_state == MSP_STATE_RUN) + msp->msp_state = MSP_STATE_CONFIGURED; + + return status; +} + +/** + * msp_disable_receive - Disable receive functionality. + * @msp: main msp structure. + * Context: process. + * + * This function will disable msp controller's receive functionality like dma, + * interrupt receive data buffer all are disabled. + * + * Returns void. + */ +static void msp_disable_receive(struct msp *msp) +{ + stm_msp_write((stm_msp_read(msp->registers + MSP_GCR) & + (~RX_ENABLE)), msp->registers + MSP_GCR); + stm_msp_write((stm_msp_read(msp->registers + MSP_DMACR) & + (~RX_DMA_ENABLE)), msp->registers + MSP_DMACR); + stm_msp_write((stm_msp_read(msp->registers + MSP_IMSC) & + (~ + (RECEIVE_SERVICE_INT | + RECEIVE_OVERRUN_ERROR_INT))), + msp->registers + MSP_IMSC); + msp->xfer_data.message.rxbytes = 0; + msp->xfer_data.message.rx_offset = 0; + msp->xfer_data.message.rxdata = NULL; + msp->read = NULL; + +} + +/** + * msp_disable_transmit - Disable transmit functionality. + * @msp: main msp structure. + * Context: process. + * + * This function will disable msp controller's transmit functionality like dma, + * interrupt transmit data buffer all are disabled. + * + * Returns void. + */ +static void msp_disable_transmit(struct msp *msp) +{ + + stm_msp_write((stm_msp_read(msp->registers + MSP_GCR) & + (~TX_ENABLE)), msp->registers + MSP_GCR); + stm_msp_write((stm_msp_read(msp->registers + MSP_DMACR) & + (~TX_DMA_ENABLE)), msp->registers + MSP_DMACR); + stm_msp_write((stm_msp_read(msp->registers + MSP_IMSC) & + (~ + (TRANSMIT_SERVICE_INT | + TRANSMIT_UNDERRUN_ERR_INT))), + msp->registers + MSP_IMSC); + msp->xfer_data.message.txbytes = 0; + msp->xfer_data.message.tx_offset = 0; + msp->xfer_data.message.txdata = NULL; + msp->write = NULL; + +} + +/** + * stm_msp_disable - disable the given msp controller + * @msp: specifies the msp contoller data + * @direction: specifies the transmit/receive direction + * @flag: It indicates the functionality that needs to be disabled. + * + * Returns error(-1) in case of failure else success(0) + */ +static int stm_msp_disable(struct msp *msp, int direction, i2s_flag flag) +{ + int limit = 32; + u32 dummy = 0; + int status = 0; + if (! + (stm_msp_read(msp->registers + MSP_GCR) & + ((TX_ENABLE | RX_ENABLE)))) { + return 0; + } + if (msp->work_mode == MSP_DMA_MODE) { + if (flag == DISABLE_ALL || flag == DISABLE_TRANSMIT) { + if (msp->tx_pipeid != NULL) { + if (msp->infinite) { + stedma40_cyclic_stop(msp->tx_pipeid); + stedma40_cyclic_free(msp->tx_pipeid); + } + msp->tx_pipeid->device-> + device_control(msp->tx_pipeid, + DMA_TERMINATE_ALL, 0); + dma_release_channel(msp->tx_pipeid); + msp->tx_pipeid = NULL; + } + } + if ((flag == DISABLE_ALL || flag == DISABLE_RECEIVE)) { + if (msp->rx_pipeid != NULL) { + if (msp->infinite) { + stedma40_cyclic_stop(msp->rx_pipeid); + stedma40_cyclic_free(msp->rx_pipeid); + } + + msp->rx_pipeid->device-> + device_control(msp->rx_pipeid, + DMA_TERMINATE_ALL, 0); + dma_release_channel(msp->rx_pipeid); + msp->rx_pipeid = NULL; + } + } + + msp->infinite = false; + } + if (flag == DISABLE_TRANSMIT) + msp_disable_transmit(msp); + else if (flag == DISABLE_RECEIVE) + msp_disable_receive(msp); + else { + stm_msp_write((stm_msp_read(msp->registers + MSP_GCR) | + (LOOPBACK_MASK)), msp->registers + MSP_GCR); + /* Flush Tx fifo */ + while ((! + (stm_msp_read(msp->registers + MSP_FLR) & + TX_FIFO_EMPTY)) && limit--) + dummy = stm_msp_read(msp->registers + MSP_DR); + + /* Disable Transmit channel */ + stm_msp_write((stm_msp_read(msp->registers + MSP_GCR) & + (~TX_ENABLE)), msp->registers + MSP_GCR); + limit = 32; + /* Flush Rx Fifo */ + while ((! + (stm_msp_read(msp->registers + MSP_FLR) & + RX_FIFO_EMPTY)) && limit--) + dummy = stm_msp_read(msp->registers + MSP_DR); + /* Disable Loopback and Receive channel */ + stm_msp_write((stm_msp_read(msp->registers + MSP_GCR) & + (~(RX_ENABLE | LOOPBACK_MASK))), + msp->registers + MSP_GCR); + /*This has been added in order to fix fifo flush problem + When last xfer occurs some data remains in fifo. In order to + flush that data delay is needed */ + msleep(10); + msp_disable_transmit(msp); + msp_disable_receive(msp); + + } + + /* disable sample rate and frame generators */ + if (flag == DISABLE_ALL) { + msp->msp_state = MSP_STATE_IDLE; + stm_msp_write((stm_msp_read(msp->registers + MSP_GCR) & + (~(FRAME_GEN_ENABLE | SRG_ENABLE))), + msp->registers + MSP_GCR); + memset(&msp->xfer_data, 0, sizeof(struct trans_data)); + if (msp->plat_exit) + status = msp->plat_exit(); + if (status) + printk(KERN_ERR "Error in msp_i2s_exit\n"); + if (msp->work_mode == MSP_POLLING_MODE + && msp->msp_state == MSP_STATE_RUN) { + up(&msp->lock); + } + msp->transfer = NULL; + stm_msp_write(0, msp->registers + MSP_GCR); + stm_msp_write(0, msp->registers + MSP_TCF); + stm_msp_write(0, msp->registers + MSP_RCF); + stm_msp_write(0, msp->registers + MSP_DMACR); + stm_msp_write(0, msp->registers + MSP_SRG); + stm_msp_write(0, msp->registers + MSP_MCR); + stm_msp_write(0, msp->registers + MSP_RCM); + stm_msp_write(0, msp->registers + MSP_RCV); + stm_msp_write(0, msp->registers + MSP_TCE0); + stm_msp_write(0, msp->registers + MSP_TCE1); + stm_msp_write(0, msp->registers + MSP_TCE2); + stm_msp_write(0, msp->registers + MSP_TCE3); + stm_msp_write(0, msp->registers + MSP_RCE0); + stm_msp_write(0, msp->registers + MSP_RCE1); + stm_msp_write(0, msp->registers + MSP_RCE2); + stm_msp_write(0, msp->registers + MSP_RCE3); + } + return status; +} + +/** + * stm_msp_close - Close the current i2s connection btw controller and client. + * @i2s_cont: i2s controller structure + * @flag: It indicates the functionality that needs to be disabled. + * Context: process + * + * It will call msp_disable and reset the msp configuration. Disables Rx and Tx + * channels, free gpio irqs and interrupt pins. + * Called by i2s client driver to indicate the completion of use of i2s bus. + * It is registered on i2s_close function. + * + * Returns error(-1) in case of failure or success(0). + */ +static int stm_msp_close(struct i2s_controller *i2s_cont, i2s_flag flag) +{ + int status = 0; + struct msp *msp = (struct msp *)i2s_cont->data; + down(&msp->lock); + if (msp->users == 0) { + pr_err("MSP already closed!\n"); + status = -EINVAL; + goto end; + } + dev_dbg(&i2s_cont->dev, "%s: users = %d, flag = %d.\n", + __func__, msp->users, flag); + /* We need to call it twice for DISABLE_ALL*/ + msp->users = flag == DISABLE_ALL ? 0 : msp->users - 1; + if (msp->users) + status = stm_msp_disable(msp, MSP_BOTH_T_R_MODE, flag); + else { + status = stm_msp_disable(msp, MSP_BOTH_T_R_MODE, DISABLE_ALL); + clk_disable(msp->clk); + status = regulator_disable(msp_vape_supply); + if (status != 0) { + dev_err(&msp->i2s_cont->dev, + "Failed to disable regulator\n"); + clk_enable(msp->clk); + goto end; + } + } + if (status) + goto end; + if (msp->users) + msp->direction = flag == DISABLE_TRANSMIT ? + MSP_RECEIVE_MODE : MSP_TRANSMIT_MODE; + + if (msp->vape_opp_constraint == 1) { + prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP, "msp_i2s", 50); + msp->vape_opp_constraint = 0; + } +end: + up(&msp->lock); + return status; + +} + +static int stm_msp_hw_status(struct i2s_controller *i2s_cont) +{ + struct msp *msp = (struct msp *)i2s_cont->data; + + int status = stm_msp_read(msp->registers + MSP_RIS) & 0xee; + if (status) + stm_msp_write(status, msp->registers + MSP_ICR); + + return status; +} + +static dma_addr_t stm_msp_get_pointer(struct i2s_controller *i2s_cont, + enum i2s_direction_t i2s_direction) +{ + struct msp *msp = (struct msp *)i2s_cont->data; + return (i2s_direction == I2S_DIRECTION_TX) ? + stedma40_get_src_addr(msp->tx_pipeid) : + stedma40_get_dst_addr(msp->rx_pipeid); +} + + /*Platform driver's functions */ +/** + * msp_probe - Probe function + * @pdev: platform device structure. + * Context: process + * + * Probe function of msp platform driver.Handles allocation of memory and irq + * resource. It creates i2s_controller and one i2s_device per msp controller. + * + * Returns error(-1) in case of failure or success(0). + */ +int msp_probe(struct platform_device *pdev) +{ + int status = 0; + struct device *dev; + s16 platform_num = 0; + struct resource *res = NULL; + int irq; + struct i2s_controller *i2s_cont; + struct msp_i2s_platform_data *platform_data; + struct msp *msp; + + if (!pdev) + return -EPERM; + msp = kzalloc(sizeof(*msp), GFP_KERNEL); + + platform_data = (struct msp_i2s_platform_data *)pdev->dev.platform_data; + + msp->id = platform_data->id; + msp->plat_init = platform_data->msp_i2s_init; + msp->plat_exit = platform_data->msp_i2s_exit; + + msp->dma_cfg_rx = platform_data->msp_i2s_dma_rx; + msp->dma_cfg_tx = platform_data->msp_i2s_dma_tx; + + dev = &pdev->dev; + platform_num = msp->id - 1; + + init_MUTEX(&msp->lock); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res == NULL) { + dev_err(&pdev->dev, "probe - MEM resources not defined\n"); + status = -EINVAL; + goto free_msp; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + status = -EINVAL; + goto free_msp; + } + msp->irq = irq; + + msp->registers = ioremap(res->start, (res->end - res->start + 1)); + if (msp->registers == NULL) { + status = -EINVAL; + goto free_msp; + } + + msp_vape_supply = regulator_get(NULL, "v-ape"); + if (IS_ERR(msp_vape_supply)) { + status = PTR_ERR(msp_vape_supply); + printk(KERN_WARNING "msp i2s : failed to get v-ape supply\n"); + goto free_irq; + } + prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP, "msp_i2s", 50); + msp->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(msp->clk)) { + status = PTR_ERR(msp->clk); + goto free_irq; + } + + init_timer(&msp->notify_timer); + msp->notify_timer.expires = jiffies + msecs_to_jiffies(1000); + msp->notify_timer.function = func_notify_timer; + msp->notify_timer.data = (unsigned long)msp; + + msp->rx_pipeid = NULL; + msp->tx_pipeid = NULL; + msp->read = NULL; + msp->write = NULL; + msp->transfer = NULL; + msp->msp_state = MSP_STATE_IDLE; + msp->loopback_enable = 0; + + dev_set_drvdata(&pdev->dev, msp); + /* I2S Controller is allocated and added in I2S controller class. */ + i2s_cont = + (struct i2s_controller *)kzalloc(sizeof(*i2s_cont), GFP_KERNEL); + if (!i2s_cont) { + dev_err(&pdev->dev, "i2s controller alloc failed \n"); + status = -EINVAL; + goto del_timer; + } + i2s_cont->dev.parent = dev; + i2s_cont->algo = &i2s_algo; + i2s_cont->data = (void *)msp; + i2s_cont->id = platform_num; + snprintf(i2s_cont->name, sizeof(i2s_cont->name), + "MSP_I2S.%04x", platform_num); + + status = i2s_add_controller(i2s_cont); + if (status) { + dev_err(&pdev->dev, "i2s add controller failed (%d)\n", status); + goto free_cont; + } + msp->i2s_cont = i2s_cont; + return status; +free_cont: + kfree(msp->i2s_cont); +del_timer: + del_timer_sync(&msp->notify_timer); + clk_put(msp->clk); +free_irq: + iounmap(msp->registers); +free_msp: + kfree(msp); + return status; +} + +/** + * msp_remove - remove function + * @pdev: platform device structure. + * Context: process + * + * remove function of msp platform driver.Handles dellocation of memory and irq + * resource. It deletes i2s_controller and one i2s_device per msp controller + * created in msp_probe. + * + * Returns error(-1) in case of failure or success(0). + */ +static int msp_remove(struct platform_device *pdev) +{ + struct msp *msp = + (struct msp *)dev_get_drvdata(&pdev->dev); + int status = 0; + i2s_del_controller(msp->i2s_cont); + del_timer_sync(&msp->notify_timer); + clk_put(msp->clk); + iounmap(msp->registers); + prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP, "msp_i2s"); + regulator_put(msp_vape_supply); + kfree(msp); + return status; +} +#ifdef CONFIG_PM +/** + * msp_suspend - MSP suspend function registered with PM framework. + * @pdev: Reference to platform device structure of the device + * @state: power mgmt state. + * + * This function is invoked when the system is going into sleep, called + * by the power management framework of the linux kernel. + * Nothing is required as controller is configured with every transfer. + * It is assumed that no active tranfer is in progress at this time. + * Client driver should make sure of this. + * + */ + +int msp_suspend(struct platform_device *pdev, pm_message_t state) +{ + struct msp *msp = + (struct msp *)dev_get_drvdata(&pdev->dev); + + down(&msp->lock); + if (msp->users > 0) { + up(&msp->lock); + return -EBUSY; + } + up(&msp->lock); + + return 0; +} +/** + * msp_resume - MSP Resume function registered with PM framework. + * @pdev: Reference to platform device structure of the device + * + * This function is invoked when the system is coming out of sleep, called + * by the power management framework of the linux kernel. + * Nothing is required. + * + */ + +int msp_resume(struct platform_device *pdev) +{ + return 0; +} +#else +#define msp_suspend NULL +#define msp_resume NULL +#endif + +static struct platform_driver msp_i2s_driver = { + .probe = msp_probe, + .remove = msp_remove, + .suspend = msp_suspend, + .resume = msp_resume, + .driver = { + .owner = THIS_MODULE, + .name = "MSP_I2S", + }, +}; + +static int __init stm_msp_mod_init(void) +{ + return platform_driver_register(&msp_i2s_driver); +} + +static void __exit stm_msp_exit(void) +{ + platform_driver_unregister(&msp_i2s_driver); + return; +} + +module_init(stm_msp_mod_init); +module_exit(stm_msp_exit); + +MODULE_AUTHOR("Sandeep Kaushik"); +MODULE_DESCRIPTION("STM MSP driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/i2s/msp_i2s.h b/drivers/misc/i2s/msp_i2s.h new file mode 100644 index 00000000000..3fcb92867e2 --- /dev/null +++ b/drivers/misc/i2s/msp_i2s.h @@ -0,0 +1,362 @@ +/*----------------------------------------------------------------------------------*/ +/* copyright STMicroelectronics, 2007. */ +/* */ +/* This program is free software; you can redistribute it and/or modify it under */ +/* the terms of the GNU General Public License as published by the Free */ +/* Software Foundation; either version 2.1 of the License, or (at your option) */ +/* any later version. */ +/* */ +/* This program is distributed in the hope that it will be useful, but WITHOUT */ +/* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS */ +/* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. */ +/* */ +/* You should have received a copy of the GNU General Public License */ +/* along with this program. If not, see <http://www.gnu.org/licenses/>. */ +/*----------------------------------------------------------------------------------*/ + + +#ifndef STM_MSP_HEADER +#define STM_MSP_HEADER + +#define MSP_DR 0x00 +#define MSP_GCR 0x04 +#define MSP_TCF 0x08 +#define MSP_RCF 0x0c +#define MSP_SRG 0x10 +#define MSP_FLR 0x14 +#define MSP_DMACR 0x18 + +#define MSP_IMSC 0x20 +#define MSP_RIS 0x24 +#define MSP_MIS 0x28 +#define MSP_ICR 0x2c +#define MSP_MCR 0x30 +#define MSP_RCV 0x34 +#define MSP_RCM 0x38 + +#define MSP_TCE0 0x40 +#define MSP_TCE1 0x44 +#define MSP_TCE2 0x48 +#define MSP_TCE3 0x4c + +#define MSP_RCE0 0x60 +#define MSP_RCE1 0x64 +#define MSP_RCE2 0x68 +#define MSP_RCE3 0x6c + +#define MSP_ITCR 0x80 +#define MSP_ITIP 0x84 +#define MSP_ITOP 0x88 +#define MSP_TSTDR 0x8c + +#define MSP_PID0 0xfe0 +#define MSP_PID1 0xfe4 +#define MSP_PID2 0xfe8 +#define MSP_PID3 0xfec + +#define MSP_CID0 0xff0 +#define MSP_CID1 0xff4 +#define MSP_CID2 0xff8 +#define MSP_CID3 0xffc + + +/* Single or dual phase mode */ +enum +{ + MSP_SINGLE_PHASE, + MSP_DUAL_PHASE +}; + + +/* Transmit/Receive shifter status +-----------------------------------*/ +enum +{ + MSP_SxHIFTER_IDLE = 0, + MSP_SHIFTER_WORKING = 1 +}; + + +/* Transmit/Receive FIFO status +---------------------------------*/ +enum +{ + MSP_FIFO_FULL, + MSP_FIFO_PART_FILLED, + MSP_FIFO_EMPTY +}; + + +/* Frame length +------------------*/ +enum +{ + MSP_FRAME_LENGTH_1 = 0, + MSP_FRAME_LENGTH_2 = 1, + MSP_FRAME_LENGTH_4 = 3, + MSP_FRAME_LENGTH_8 = 7, + MSP_FRAME_LENGTH_12 = 11, + MSP_FRAME_LENGTH_16 = 15, + MSP_FRAME_LENGTH_20 = 19, + MSP_FRAME_LENGTH_32 = 31, + MSP_FRAME_LENGTH_48 = 47, + MSP_FRAME_LENGTH_64 = 63 +}; + +/* Element length */ +enum +{ + MSP_ELEM_LENGTH_8 = 0, + MSP_ELEM_LENGTH_10 = 1, + MSP_ELEM_LENGTH_12 = 2, + MSP_ELEM_LENGTH_14 = 3, + MSP_ELEM_LENGTH_16 = 4, + MSP_ELEM_LENGTH_20 = 5, + MSP_ELEM_LENGTH_24 = 6, + MSP_ELEM_LENGTH_32 = 7 +}; + + +/* Data delay (in bit clock cycles) +---------------------------------------*/ +enum +{ + MSP_DELAY_0 = 0, + MSP_DELAY_1 = 1, + MSP_DELAY_2 = 2, + MSP_DELAY_3 = 3 +}; + + +/* Configurations of clocks (transmit, receive or sample rate generator) +-------------------------------------------------------------------------*/ +enum +{ + MSP_RISING_EDGE = 0, + MSP_FALLING_EDGE = 1 +}; + +/* Protocol dependant parameters list */ +struct msp_protocol_desc +{ + u32 phase_mode; + u32 frame_len_1; + u32 frame_len_2; + u32 element_len_1; + u32 element_len_2; + u32 data_delay; + u32 tx_clock_edge; + u32 rx_clock_edge; +}; +#define RX_ENABLE_MASK 0x00000001 +#define RX_FIFO_ENABLE_MASK 0x00000002 +#define RX_FRAME_SYNC_MASK 0x00000004 +#define DIRECT_COMPANDING_MASK 0x00000008 +#define RX_SYNC_SEL_MASK 0x00000010 +#define RX_CLK_POL_MASK 0x00000020 +#define RX_CLK_SEL_MASK 0x00000040 +#define LOOPBACK_MASK 0x00000080 +#define TX_ENABLE_MASK 0x00000100 +#define TX_FIFO_ENABLE_MASK 0x00000200 +#define TX_FRAME_SYNC_MASK 0x00000400 +#define TX_MSP_TDR_TSR 0x00000800 +#define TX_SYNC_SEL_MASK 0x00001800 +#define TX_CLK_POL_MASK 0x00002000 +#define TX_CLK_SEL_MASK 0x00004000 +#define TX_EXTRA_DELAY_MASK 0x00008000 +#define SRG_ENABLE_MASK 0x00010000 +#define SRG_CLK_POL_MASK 0x00020000 +#define SRG_CLK_SEL_MASK 0x000C0000 +#define FRAME_GEN_EN_MASK 0x00100000 +#define SPI_CLK_MODE_MASK 0x00600000 +#define SPI_BURST_MODE_MASK 0x00800000 + +#define RXEN_BIT 0 +#define RFFEN_BIT 1 +#define RFSPOL_BIT 2 +#define DCM_BIT 3 +#define RFSSEL_BIT 4 +#define RCKPOL_BIT 5 +#define RCKSEL_BIT 6 +#define LBM_BIT 7 +#define TXEN_BIT 8 +#define TFFEN_BIT 9 +#define TFSPOL_BIT 10 +#define TFSSEL_BIT 11 +#define TCKPOL_BIT 13 +#define TCKSEL_BIT 14 +#define TXDDL_BIT 15 +#define SGEN_BIT 16 +#define SCKPOL_BIT 17 +#define SCKSEL_BIT 18 +#define FGEN_BIT 20 +#define SPICKM_BIT 21 + +#define msp_rx_clkpol_bit(n) ((n & 1) << RCKPOL_BIT) +#define msp_tx_clkpol_bit(n) ((n & 1) << TCKPOL_BIT) +#define msp_spi_clk_mode_bits(n) ((n & 3) << SPICKM_BIT) + + +/* Use this to clear the clock mode bits to non-spi */ +#define MSP_NON_SPI_CLK_MASK 0x00600000 + +#define P1ELEN_BIT 0 +#define P1FLEN_BIT 3 +#define DTYP_BIT 10 +#define ENDN_BIT 12 +#define DDLY_BIT 13 +#define FSIG_BIT 15 +#define P2ELEN_BIT 16 +#define P2FLEN_BIT 19 +#define P2SM_BIT 26 +#define P2EN_BIT 27 + +#define msp_p1_elem_len_bits(n) (n & 0x00000007) +#define msp_p2_elem_len_bits(n) (((n) << P2ELEN_BIT) & 0x00070000) +#define msp_p1_frame_len_bits(n) (((n) << P1FLEN_BIT) & 0x00000378) +#define msp_p2_frame_len_bits(n) (((n) << P2FLEN_BIT) & 0x03780000) +#define msp_data_delay_bits(n) (((n) << DDLY_BIT) & 0x00003000) +#define msp_data_type_bits(n) (((n) << DTYP_BIT) & 0x00000600) +#define msp_p2_start_mode_bit(n) (n << P2SM_BIT) +#define msp_p2_enable_bit(n) (n << P2EN_BIT) + +/* Flag register +--------------------*/ +#define RX_BUSY 0x00000001 +#define RX_FIFO_EMPTY 0x00000002 +#define RX_FIFO_FULL 0x00000004 +#define TX_BUSY 0x00000008 +#define TX_FIFO_EMPTY 0x00000010 +#define TX_FIFO_FULL 0x00000020 + +#define RBUSY_BIT 0 +#define RFE_BIT 1 +#define RFU_BIT 2 +#define TBUSY_BIT 3 +#define TFE_BIT 4 +#define TFU_BIT 5 + +/* Multichannel control register +---------------------------------*/ +#define RMCEN_BIT 0 +#define RMCSF_BIT 1 +#define RCMPM_BIT 3 +#define TMCEN_BIT 5 +#define TNCSF_BIT 6 + +/* Sample rate generator register +------------------------------------*/ +#define SCKDIV_BIT 0 +#define FRWID_BIT 10 +#define FRPER_BIT 16 + +#define SCK_DIV_MASK 0x0000003FF +#define frame_width_bits(n) (((n) << FRWID_BIT) &0x0000FC00) +#define frame_period_bits(n) (((n) << FRPER_BIT) &0x1FFF0000) + + +/* DMA controller register +---------------------------*/ +#define RX_DMA_ENABLE 0x00000001 +#define TX_DMA_ENABLE 0x00000002 + +#define RDMAE_BIT 0 +#define TDMAE_BIT 1 + +/*Interrupt Register +-----------------------------------------*/ +#define RECEIVE_SERVICE_INT 0x00000001 +#define RECEIVE_OVERRUN_ERROR_INT 0x00000002 +#define RECEIVE_FRAME_SYNC_ERR_INT 0x00000004 +#define RECEIVE_FRAME_SYNC_INT 0x00000008 +#define TRANSMIT_SERVICE_INT 0x00000010 +#define TRANSMIT_UNDERRUN_ERR_INT 0x00000020 +#define TRANSMIT_FRAME_SYNC_ERR_INT 0x00000040 +#define TRANSMIT_FRAME_SYNC_INT 0x00000080 +#define ALL_INT 0x000000ff + +/* Protocol configuration values +* I2S: Single phase, 16 bits, 2 words per frame +-----------------------------------------------*/ +#define I2S_PROTOCOL_DESC \ +{ \ + MSP_SINGLE_PHASE, \ + MSP_FRAME_LENGTH_1, \ + MSP_FRAME_LENGTH_1, \ + MSP_ELEM_LENGTH_32, \ + MSP_ELEM_LENGTH_32, \ + MSP_DELAY_1, \ + MSP_FALLING_EDGE, \ + MSP_FALLING_EDGE \ +} + +#define PCM_PROTOCOL_DESC \ +{ \ + MSP_SINGLE_PHASE, \ + MSP_FRAME_LENGTH_1, \ + MSP_FRAME_LENGTH_1, \ + MSP_ELEM_LENGTH_16, \ + MSP_ELEM_LENGTH_16, \ + MSP_DATA_DELAY, \ + MSP_TX_CLOCK_EDGE, \ + MSP_RX_CLOCK_EDGE \ +} + +/* Companded PCM: Single phase, 8 bits, 1 word per frame +--------------------------------------------------------*/ +#define PCM_COMPAND_PROTOCOL_DESC \ +{ \ + MSP_SINGLE_PHASE, \ + MSP_FRAME_LENGTH_1, \ + MSP_FRAME_LENGTH_1, \ + MSP_ELEM_LENGTH_8, \ + MSP_ELEM_LENGTH_8, \ + MSP_DELAY_0, \ + MSP_RISING_EDGE, \ + MSP_FALLING_EDGE \ +} + +/* AC97: Double phase, 1 element of 16 bits during first phase, +* 12 elements of 20 bits in second phase. +--------------------------------------------------------------*/ +#define AC97_PROTOCOL_DESC \ +{ \ + MSP_DUAL_PHASE, \ + MSP_FRAME_LENGTH_1, \ + MSP_FRAME_LENGTH_12, \ + MSP_ELEM_LENGTH_16, \ + MSP_ELEM_LENGTH_20, \ + MSP_DELAY_1, \ + MSP_RISING_EDGE, \ + MSP_FALLING_EDGE \ +} + +#define SPI_MASTER_PROTOCOL_DESC \ +{ \ + MSP_SINGLE_PHASE, \ + MSP_FRAME_LENGTH_1, \ + MSP_FRAME_LENGTH_1, \ + MSP_ELEM_LENGTH_8, \ + MSP_ELEM_LENGTH_8, \ + MSP_DELAY_1, \ + MSP_FALLING_EDGE, \ + MSP_RISING_EDGE \ +} +#define SPI_SLAVE_PROTOCOL_DESC \ +{ \ + MSP_SINGLE_PHASE, \ + MSP_FRAME_LENGTH_1, \ + MSP_FRAME_LENGTH_1, \ + MSP_ELEM_LENGTH_8, \ + MSP_ELEM_LENGTH_8, \ + MSP_DELAY_1, \ + MSP_FALLING_EDGE, \ + MSP_RISING_EDGE \ +} + +#define MSP_FRAME_PERIOD_IN_MONO_MODE 256 +#define MSP_FRAME_PERIOD_IN_STEREO_MODE 32 +#define MSP_FRAME_WIDTH_IN_STEREO_MODE 16 + +#endif + diff --git a/drivers/misc/iface_stat.c b/drivers/misc/iface_stat.c new file mode 100644 index 00000000000..bd16449f7d8 --- /dev/null +++ b/drivers/misc/iface_stat.c @@ -0,0 +1,234 @@ +/* drivers/misc/iface_stat.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/err.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/in.h> +#include <linux/list.h> +#include <linux/proc_fs.h> +#include <linux/slab.h> +#include <linux/stat.h> +#include <linux/netdevice.h> +#include <linux/inetdevice.h> +#include <linux/rtnetlink.h> +#include <linux/iface_stat.h> + +static LIST_HEAD(iface_list); +static struct proc_dir_entry *iface_stat_procdir; + +struct iface_stat { + struct list_head if_link; + char *iface_name; + unsigned long tx_bytes; + unsigned long rx_bytes; + unsigned long tx_packets; + unsigned long rx_packets; + bool active; +}; + +static int read_proc_entry(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + int len; + unsigned long value; + char *p = page; + unsigned long *iface_entry = (unsigned long *) data; + if (!data) + return 0; + + value = (unsigned long) (*iface_entry); + p += sprintf(p, "%lu\n", value); + len = (p - page) - off; + *eof = (len <= count) ? 1 : 0; + *start = page + off; + return len; +} + +static int read_proc_bool_entry(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + int len; + bool value; + char *p = page; + unsigned long *iface_entry = (unsigned long *) data; + if (!data) + return 0; + + value = (bool) (*iface_entry); + p += sprintf(p, "%u\n", value ? 1 : 0); + len = (p - page) - off; + *eof = (len <= count) ? 1 : 0; + *start = page + off; + return len; +} + +/* Find the entry for tracking the specified interface. */ +static struct iface_stat *get_iface_stat(const char *ifname) +{ + struct iface_stat *iface_entry; + if (!ifname) + return NULL; + + list_for_each_entry(iface_entry, &iface_list, if_link) { + if (!strcmp(iface_entry->iface_name, ifname)) + return iface_entry; + } + return NULL; +} + +/* + * Create a new entry for tracking the specified interface. + * Do nothing if the entry already exists. + * Called when an interface is configured with a valid IP address. + */ +void create_iface_stat(const struct in_device *in_dev) +{ + struct iface_stat *new_iface; + struct proc_dir_entry *proc_entry; + const struct net_device *dev; + const char *ifname; + struct iface_stat *entry; + __be32 ipaddr = 0; + struct in_ifaddr *ifa = NULL; + + ASSERT_RTNL(); /* No need for separate locking */ + + dev = in_dev->dev; + if (!dev) { + pr_err("iface_stat: This should never happen.\n"); + return; + } + + ifname = dev->name; + for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) + if (!strcmp(dev->name, ifa->ifa_label)) + break; + + if (ifa) + ipaddr = ifa->ifa_local; + else { + pr_err("iface_stat: Interface not found.\n"); + return; + } + + entry = get_iface_stat(dev->name); + if (entry != NULL) { + pr_debug("iface_stat: Already monitoring device %s\n", ifname); + if (ipv4_is_loopback(ipaddr)) { + entry->active = false; + pr_debug("iface_stat: Disabling monitor for " + "loopback device %s\n", ifname); + } else { + entry->active = true; + pr_debug("iface_stat: Re-enabling monitor for " + "device %s with ip %pI4\n", + ifname, &ipaddr); + } + return; + } else if (ipv4_is_loopback(ipaddr)) { + pr_debug("iface_stat: Ignoring monitor for " + "loopback device %s with ip %pI4\n", + ifname, &ipaddr); + return; + } + + /* Create a new entry for tracking the specified interface. */ + new_iface = kmalloc(sizeof(struct iface_stat), GFP_KERNEL); + if (new_iface == NULL) + return; + + new_iface->iface_name = kmalloc((strlen(ifname)+1)*sizeof(char), + GFP_KERNEL); + if (new_iface->iface_name == NULL) { + kfree(new_iface); + return; + } + + strcpy(new_iface->iface_name, ifname); + /* Counters start at 0, so we can track 4GB of network traffic. */ + new_iface->tx_bytes = 0; + new_iface->rx_bytes = 0; + new_iface->rx_packets = 0; + new_iface->tx_packets = 0; + new_iface->active = true; + + /* Append the newly created iface stat struct to the list. */ + list_add_tail(&new_iface->if_link, &iface_list); + proc_entry = proc_mkdir(ifname, iface_stat_procdir); + + /* Keep reference to iface_stat so we know where to read stats from. */ + create_proc_read_entry("tx_bytes", S_IRUGO, proc_entry, + read_proc_entry, &new_iface->tx_bytes); + + create_proc_read_entry("rx_bytes", S_IRUGO, proc_entry, + read_proc_entry, &new_iface->rx_bytes); + + create_proc_read_entry("tx_packets", S_IRUGO, proc_entry, + read_proc_entry, &new_iface->tx_packets); + + create_proc_read_entry("rx_packets", S_IRUGO, proc_entry, + read_proc_entry, &new_iface->rx_packets); + + create_proc_read_entry("active", S_IRUGO, proc_entry, + read_proc_bool_entry, &new_iface->active); + + pr_debug("iface_stat: Now monitoring device %s with ip %pI4\n", + ifname, &ipaddr); +} + +/* + * Update stats for the specified interface. Do nothing if the entry + * does not exist (when a device was never configured with an IP address). + * Called when an device is being unregistered. + */ +void iface_stat_update(struct net_device *dev) +{ + const struct net_device_stats *stats = dev_get_stats(dev); + struct iface_stat *entry; + + ASSERT_RTNL(); + + entry = get_iface_stat(dev->name); + if (entry == NULL) { + pr_debug("iface_stat: dev %s monitor not found\n", dev->name); + return; + } + + if (entry->active) { /* FIXME: Support for more than 4GB */ + entry->tx_bytes += stats->tx_bytes; + entry->tx_packets += stats->tx_packets; + entry->rx_bytes += stats->rx_bytes; + entry->rx_packets += stats->rx_packets; + entry->active = false; + pr_debug("iface_stat: Updating stats for " + "dev %s which went down\n", dev->name); + } else + pr_debug("iface_stat: Didn't update stats for " + "dev %s which went down\n", dev->name); +} + +static int __init iface_stat_init(void) +{ + iface_stat_procdir = proc_mkdir("iface_stat", NULL); + if (!iface_stat_procdir) { + pr_err("iface_stat: failed to create proc entry\n"); + return -1; + } + + return 0; +} + +device_initcall(iface_stat_init); diff --git a/drivers/misc/kernel_debugger.c b/drivers/misc/kernel_debugger.c new file mode 100644 index 00000000000..44c19fd2f26 --- /dev/null +++ b/drivers/misc/kernel_debugger.c @@ -0,0 +1,79 @@ +/* drivers/android/kernel_debugger.c + * + * Guts of the kernel debugger. + * Needs something to actually push commands to it. + * + * Copyright (C) 2007-2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/ctype.h> +#include <linux/device.h> +#include <linux/sched.h> +#include <linux/spinlock.h> +#include <linux/sysrq.h> +#include <linux/kernel_debugger.h> + +#define dprintf(fmt...) (ctxt->printf(ctxt->cookie, fmt)) + +static void do_ps(struct kdbg_ctxt *ctxt) +{ + struct task_struct *g, *p; + unsigned state; + static const char stat_nam[] = "RSDTtZX"; + + dprintf("pid ppid prio task pc\n"); + read_lock(&tasklist_lock); + do_each_thread(g, p) { + state = p->state ? __ffs(p->state) + 1 : 0; + dprintf("%5d %5d %4d ", p->pid, p->parent->pid, p->prio); + dprintf("%-13.13s %c", p->comm, + state >= sizeof(stat_nam) ? '?' : stat_nam[state]); + if (state == TASK_RUNNING) + dprintf(" running\n"); + else + dprintf(" %08lx\n", thread_saved_pc(p)); + } while_each_thread(g, p); + read_unlock(&tasklist_lock); +} + +int log_buf_copy(char *dest, int idx, int len); +extern int do_syslog(int type, char __user *bug, int count); +static void do_sysrq(struct kdbg_ctxt *ctxt, char rq) +{ + char buf[128]; + int ret; + int idx = 0; + do_syslog(5 /* clear */, NULL, 0); + handle_sysrq(rq, NULL); + while (1) { + ret = log_buf_copy(buf, idx, sizeof(buf) - 1); + if (ret <= 0) + break; + buf[ret] = 0; + dprintf("%s", buf); + idx += ret; + } +} + +int kernel_debugger(struct kdbg_ctxt *ctxt, char *cmd) +{ + if (!strcmp(cmd, "ps")) + do_ps(ctxt); + if (!strcmp(cmd, "sysrq")) + do_sysrq(ctxt, 'h'); + if (!strncmp(cmd, "sysrq ", 6)) + do_sysrq(ctxt, cmd[6]); + + return 0; +} + diff --git a/drivers/misc/mbox.c b/drivers/misc/mbox.c new file mode 100644 index 00000000000..63435389c54 --- /dev/null +++ b/drivers/misc/mbox.c @@ -0,0 +1,567 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Stefan Nilsson <stefan.xk.nilsson@stericsson.com> for ST-Ericsson. + * Author: Martin Persson <martin.persson@stericsson.com> for ST-Ericsson. + * License terms: GNU General Public License (GPL), version 2. + */ + +/* + * Mailbox nomenclature: + * + * APE MODEM + * mbox pairX + * .......................... + * . . + * . peer . + * . send ---- . + * . --> | | . + * . | | . + * . ---- . + * . . + * . local . + * . rec ---- . + * . | | <-- . + * . | | . + * . ---- . + * ......................... + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/spinlock.h> +#include <linux/errno.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/platform_device.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/completion.h> +#include <mach/mbox.h> + +#define MBOX_NAME "mbox" + +#define MBOX_FIFO_DATA 0x000 +#define MBOX_FIFO_ADD 0x004 +#define MBOX_FIFO_REMOVE 0x008 +#define MBOX_FIFO_THRES_FREE 0x00C +#define MBOX_FIFO_THRES_OCCUP 0x010 +#define MBOX_FIFO_STATUS 0x014 + +#define MBOX_DISABLE_IRQ 0x4 +#define MBOX_ENABLE_IRQ 0x0 +#define MBOX_LATCH 1 + +/* Global list of all mailboxes */ +static struct list_head mboxs = LIST_HEAD_INIT(mboxs); + +static struct mbox *get_mbox_with_id(u8 id) +{ + u8 i; + struct list_head *pos = &mboxs; + for (i = 0; i <= id; i++) + pos = pos->next; + + return (struct mbox *) list_entry(pos, struct mbox, list); +} + +int mbox_send(struct mbox *mbox, u32 mbox_msg, bool block) +{ + int res = 0; + + spin_lock(&mbox->lock); + + dev_dbg(&(mbox->pdev->dev), + "About to buffer 0x%X to mailbox 0x%X." + " ri = %d, wi = %d\n", + mbox_msg, (u32)mbox, mbox->read_index, + mbox->write_index); + + /* Check if write buffer is full */ + while (((mbox->write_index + 1) % MBOX_BUF_SIZE) == mbox->read_index) { + if (!block) { + dev_dbg(&(mbox->pdev->dev), + "Buffer full in non-blocking call! " + "Returning -ENOMEM!\n"); + res = -ENOMEM; + goto exit; + } + spin_unlock(&mbox->lock); + dev_dbg(&(mbox->pdev->dev), + "Buffer full in blocking call! Sleeping...\n"); + mbox->client_blocked = 1; + wait_for_completion(&mbox->buffer_available); + dev_dbg(&(mbox->pdev->dev), + "Blocking send was woken up! Trying again...\n"); + spin_lock(&mbox->lock); + } + + mbox->buffer[mbox->write_index] = mbox_msg; + mbox->write_index = (mbox->write_index + 1) % MBOX_BUF_SIZE; + + /* + * Indicate that we want an IRQ as soon as there is a slot + * in the FIFO + */ + writel(MBOX_ENABLE_IRQ, mbox->virtbase_peer + MBOX_FIFO_THRES_FREE); + +exit: + spin_unlock(&mbox->lock); + return res; +} +EXPORT_SYMBOL(mbox_send); + +#if defined(CONFIG_DEBUG_FS) +/* + * Expected input: <value> <nbr sends> + * Example: "echo 0xdeadbeef 4 > mbox-node" sends 0xdeadbeef 4 times + */ +static ssize_t mbox_write_fifo(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + unsigned long mbox_mess; + unsigned long nbr_sends; + unsigned long i; + char int_buf[16]; + char *token; + char *val; + + struct mbox *mbox = (struct mbox *) dev->platform_data; + + strncpy((char *) &int_buf, buf, sizeof(int_buf)); + token = (char *) &int_buf; + + /* Parse message */ + val = strsep(&token, " "); + if ((val == NULL) || (strict_strtoul(val, 16, &mbox_mess) != 0)) + mbox_mess = 0xDEADBEEF; + + val = strsep(&token, " "); + if ((val == NULL) || (strict_strtoul(val, 10, &nbr_sends) != 0)) + nbr_sends = 1; + + dev_dbg(dev, "Will write 0x%lX %ld times using data struct at 0x%X\n", + mbox_mess, nbr_sends, (u32) mbox); + + for (i = 0; i < nbr_sends; i++) + mbox_send(mbox, mbox_mess, true); + + return count; +} + +static ssize_t mbox_read_fifo(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int mbox_value; + struct mbox *mbox = (struct mbox *) dev->platform_data; + + if ((readl(mbox->virtbase_local + MBOX_FIFO_STATUS) & 0x7) <= 0) + return sprintf(buf, "Mailbox is empty\n"); + + mbox_value = readl(mbox->virtbase_local + MBOX_FIFO_DATA); + writel(MBOX_LATCH, (mbox->virtbase_local + MBOX_FIFO_REMOVE)); + + return sprintf(buf, "0x%X\n", mbox_value); +} + +static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo); + +static int mbox_show(struct seq_file *s, void *data) +{ + struct list_head *pos; + u8 mbox_index = 0; + + list_for_each(pos, &mboxs) { + struct mbox *m = + (struct mbox *) list_entry(pos, struct mbox, list); + if (m == NULL) { + seq_printf(s, + "Unable to retrieve mailbox %d\n", + mbox_index); + continue; + } + + spin_lock(&m->lock); + if ((m->virtbase_peer == NULL) || (m->virtbase_local == NULL)) { + seq_printf(s, "MAILBOX %d not setup or corrupt\n", + mbox_index); + spin_unlock(&m->lock); + continue; + } + + seq_printf(s, + "===========================\n" + " MAILBOX %d\n" + " PEER MAILBOX DUMP\n" + "---------------------------\n" + "FIFO: 0x%X (%d)\n" + "Free Threshold: 0x%.2X (%d)\n" + "Occupied Threshold: 0x%.2X (%d)\n" + "Status: 0x%.2X (%d)\n" + " Free spaces (ot): %d (%d)\n" + " Occup spaces (ot): %d (%d)\n" + "===========================\n" + " LOCAL MAILBOX DUMP\n" + "---------------------------\n" + "FIFO: 0x%.X (%d)\n" + "Free Threshold: 0x%.2X (%d)\n" + "Occupied Threshold: 0x%.2X (%d)\n" + "Status: 0x%.2X (%d)\n" + " Free spaces (ot): %d (%d)\n" + " Occup spaces (ot): %d (%d)\n" + "===========================\n" + "write_index: %d\n" + "read_index : %d\n" + "===========================\n" + "\n", + mbox_index, + readl(m->virtbase_peer + MBOX_FIFO_DATA), + readl(m->virtbase_peer + MBOX_FIFO_DATA), + readl(m->virtbase_peer + MBOX_FIFO_THRES_FREE), + readl(m->virtbase_peer + MBOX_FIFO_THRES_FREE), + readl(m->virtbase_peer + MBOX_FIFO_THRES_OCCUP), + readl(m->virtbase_peer + MBOX_FIFO_THRES_OCCUP), + readl(m->virtbase_peer + MBOX_FIFO_STATUS), + readl(m->virtbase_peer + MBOX_FIFO_STATUS), + (readl(m->virtbase_peer + MBOX_FIFO_STATUS) >> 4) & 0x7, + (readl(m->virtbase_peer + MBOX_FIFO_STATUS) >> 7) & 0x1, + (readl(m->virtbase_peer + MBOX_FIFO_STATUS) >> 0) & 0x7, + (readl(m->virtbase_peer + MBOX_FIFO_STATUS) >> 3) & 0x1, + readl(m->virtbase_local + MBOX_FIFO_DATA), + readl(m->virtbase_local + MBOX_FIFO_DATA), + readl(m->virtbase_local + MBOX_FIFO_THRES_FREE), + readl(m->virtbase_local + MBOX_FIFO_THRES_FREE), + readl(m->virtbase_local + MBOX_FIFO_THRES_OCCUP), + readl(m->virtbase_local + MBOX_FIFO_THRES_OCCUP), + readl(m->virtbase_local + MBOX_FIFO_STATUS), + readl(m->virtbase_local + MBOX_FIFO_STATUS), + (readl(m->virtbase_local + MBOX_FIFO_STATUS) >> 4) & 0x7, + (readl(m->virtbase_local + MBOX_FIFO_STATUS) >> 7) & 0x1, + (readl(m->virtbase_local + MBOX_FIFO_STATUS) >> 0) & 0x7, + (readl(m->virtbase_local + MBOX_FIFO_STATUS) >> 3) & 0x1, + m->write_index, m->read_index); + mbox_index++; + spin_unlock(&m->lock); + } + + return 0; +} + +static int mbox_open(struct inode *inode, struct file *file) +{ + return single_open(file, mbox_show, NULL); +} + +static const struct file_operations mbox_operations = { + .owner = THIS_MODULE, + .open = mbox_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif + +static irqreturn_t mbox_irq(int irq, void *arg) +{ + u32 mbox_value; + int nbr_occup; + int nbr_free; + struct mbox *mbox = (struct mbox *) arg; + + spin_lock(&mbox->lock); + + dev_dbg(&(mbox->pdev->dev), + "mbox IRQ [%d] received. ri = %d, wi = %d\n", + irq, mbox->read_index, mbox->write_index); + + /* + * Check if we have any outgoing messages, and if there is space for + * them in the FIFO. + */ + if (mbox->read_index != mbox->write_index) { + /* + * Check by reading FREE for LOCAL since that indicates + * OCCUP for PEER + */ + nbr_free = (readl(mbox->virtbase_local + MBOX_FIFO_STATUS) + >> 4) & 0x7; + dev_dbg(&(mbox->pdev->dev), + "Status indicates %d empty spaces in the FIFO!\n", + nbr_free); + + while ((nbr_free > 0) && + (mbox->read_index != mbox->write_index)) { + /* Write the message and latch it into the FIFO */ + writel(mbox->buffer[mbox->read_index], + (mbox->virtbase_peer + MBOX_FIFO_DATA)); + writel(MBOX_LATCH, + (mbox->virtbase_peer + MBOX_FIFO_ADD)); + dev_dbg(&(mbox->pdev->dev), + "Wrote message 0x%X to addr 0x%X\n", + mbox->buffer[mbox->read_index], + (u32) (mbox->virtbase_peer + MBOX_FIFO_DATA)); + + nbr_free--; + mbox->read_index = + (mbox->read_index + 1) % MBOX_BUF_SIZE; + } + + /* + * Check if we still want IRQ:s when there is free + * space to send + */ + if (mbox->read_index != mbox->write_index) { + dev_dbg(&(mbox->pdev->dev), + "Still have messages to send, but FIFO full. " + "Request IRQ again!\n"); + writel(MBOX_ENABLE_IRQ, + mbox->virtbase_peer + MBOX_FIFO_THRES_FREE); + } else { + dev_dbg(&(mbox->pdev->dev), + "No more messages to send. " + "Do not request IRQ again!\n"); + writel(MBOX_DISABLE_IRQ, + mbox->virtbase_peer + MBOX_FIFO_THRES_FREE); + } + + /* + * Check if we can signal any blocked clients that it is OK to + * start buffering again + */ + if (mbox->client_blocked && + (((mbox->write_index + 1) % MBOX_BUF_SIZE) + != mbox->read_index)) { + dev_dbg(&(mbox->pdev->dev), + "Waking up blocked client\n"); + complete(&mbox->buffer_available); + mbox->client_blocked = 0; + } + } + + /* Check if we have any incoming messages */ + nbr_occup = readl(mbox->virtbase_local + MBOX_FIFO_STATUS) & 0x7; + if (nbr_occup == 0) + goto exit; + + if (mbox->cb == NULL) { + dev_dbg(&(mbox->pdev->dev), "No receive callback registered, " + "leaving %d incoming messages in fifo!\n", nbr_occup); + goto exit; + } + + /* Read and acknowledge the message */ + mbox_value = readl(mbox->virtbase_local + MBOX_FIFO_DATA); + writel(MBOX_LATCH, (mbox->virtbase_local + MBOX_FIFO_REMOVE)); + + /* Notify consumer of new mailbox message */ + dev_dbg(&(mbox->pdev->dev), "Calling callback for message 0x%X!\n", + mbox_value); + mbox->cb(mbox_value, mbox->client_data); + +exit: + dev_dbg(&(mbox->pdev->dev), "Exit mbox IRQ. ri = %d, wi = %d\n", + mbox->read_index, mbox->write_index); + spin_unlock(&mbox->lock); + + return IRQ_HANDLED; +} + +/* Setup is executed once for each mbox pair */ +struct mbox *mbox_setup(u8 mbox_id, mbox_recv_cb_t *mbox_cb, void *priv) +{ + struct resource *resource; + int irq; + int res; + struct mbox *mbox; + + mbox = get_mbox_with_id(mbox_id); + if (mbox == NULL) { + dev_err(&(mbox->pdev->dev), "Incorrect mailbox id: %d!\n", + mbox_id); + goto exit; + } + + /* + * Check if mailbox has been allocated to someone else, + * otherwise allocate it + */ + if (mbox->allocated) { + dev_err(&(mbox->pdev->dev), "Mailbox number %d is busy!\n", + mbox_id); + mbox = NULL; + goto exit; + } + mbox->allocated = true; + + dev_dbg(&(mbox->pdev->dev), "Initiating mailbox number %d: 0x%X...\n", + mbox_id, (u32)mbox); + + mbox->client_data = priv; + mbox->cb = mbox_cb; + + /* Get addr for peer mailbox and ioremap it */ + resource = platform_get_resource_byname(mbox->pdev, + IORESOURCE_MEM, + "mbox_peer"); + if (resource == NULL) { + dev_err(&(mbox->pdev->dev), + "Unable to retrieve mbox peer resource\n"); + mbox = NULL; + goto exit; + } + dev_dbg(&(mbox->pdev->dev), + "Resource name: %s start: 0x%X, end: 0x%X\n", + resource->name, resource->start, resource->end); + mbox->virtbase_peer = + ioremap(resource->start, resource->end - resource->start); + if (!mbox->virtbase_peer) { + dev_err(&(mbox->pdev->dev), "Unable to ioremap peer mbox\n"); + mbox = NULL; + goto exit; + } + dev_dbg(&(mbox->pdev->dev), + "ioremapped peer physical: (0x%X-0x%X) to virtual: 0x%X\n", + resource->start, resource->end, (u32) mbox->virtbase_peer); + + /* Get addr for local mailbox and ioremap it */ + resource = platform_get_resource_byname(mbox->pdev, + IORESOURCE_MEM, + "mbox_local"); + if (resource == NULL) { + dev_err(&(mbox->pdev->dev), + "Unable to retrieve mbox local resource\n"); + mbox = NULL; + goto exit; + } + dev_dbg(&(mbox->pdev->dev), + "Resource name: %s start: 0x%X, end: 0x%X\n", + resource->name, resource->start, resource->end); + mbox->virtbase_local = + ioremap(resource->start, resource->end - resource->start); + if (!mbox->virtbase_local) { + dev_err(&(mbox->pdev->dev), "Unable to ioremap local mbox\n"); + mbox = NULL; + goto exit; + } + dev_dbg(&(mbox->pdev->dev), + "ioremapped local physical: (0x%X-0x%X) to virtual: 0x%X\n", + resource->start, resource->end, (u32) mbox->virtbase_peer); + + init_completion(&mbox->buffer_available); + mbox->client_blocked = 0; + + /* Get IRQ for mailbox and allocate it */ + irq = platform_get_irq_byname(mbox->pdev, "mbox_irq"); + if (irq < 0) { + dev_err(&(mbox->pdev->dev), + "Unable to retrieve mbox irq resource\n"); + mbox = NULL; + goto exit; + } + + dev_dbg(&(mbox->pdev->dev), "Allocating irq %d...\n", irq); + res = request_irq(irq, mbox_irq, 0, mbox->name, (void *) mbox); + if (res < 0) { + dev_err(&(mbox->pdev->dev), + "Unable to allocate mbox irq %d\n", irq); + mbox = NULL; + goto exit; + } + + /* Set up mailbox to not launch IRQ on free space in mailbox */ + writel(MBOX_DISABLE_IRQ, mbox->virtbase_peer + MBOX_FIFO_THRES_FREE); + + /* + * Set up mailbox to launch IRQ on new message if we have + * a callback set. If not, do not raise IRQ, but keep message + * in FIFO for manual retrieval + */ + if (mbox_cb != NULL) + writel(MBOX_ENABLE_IRQ, + mbox->virtbase_local + MBOX_FIFO_THRES_OCCUP); + else + writel(MBOX_DISABLE_IRQ, + mbox->virtbase_local + MBOX_FIFO_THRES_OCCUP); + +#if defined(CONFIG_DEBUG_FS) + res = device_create_file(&(mbox->pdev->dev), &dev_attr_fifo); + if (res != 0) + dev_warn(&(mbox->pdev->dev), + "Unable to create mbox sysfs entry"); + + (void) debugfs_create_file("mbox", S_IFREG | S_IRUGO, NULL, + NULL, &mbox_operations); +#endif + + dev_info(&(mbox->pdev->dev), + "Mailbox driver with index %d initated!\n", mbox_id); + +exit: + return mbox; +} +EXPORT_SYMBOL(mbox_setup); + + +int __init mbox_probe(struct platform_device *pdev) +{ + struct mbox local_mbox; + struct mbox *mbox; + int res = 0; + dev_dbg(&(pdev->dev), "Probing mailbox (pdev = 0x%X)...\n", (u32) pdev); + + memset(&local_mbox, 0x0, sizeof(struct mbox)); + + /* Associate our mbox data with the platform device */ + res = platform_device_add_data(pdev, + (void *) &local_mbox, + sizeof(struct mbox)); + if (res != 0) { + dev_err(&(pdev->dev), + "Unable to allocate driver platform data!\n"); + goto exit; + } + + mbox = (struct mbox *) pdev->dev.platform_data; + mbox->pdev = pdev; + mbox->write_index = 0; + mbox->read_index = 0; + + INIT_LIST_HEAD(&(mbox->list)); + list_add_tail(&(mbox->list), &mboxs); + + sprintf(mbox->name, "%s", MBOX_NAME); + spin_lock_init(&mbox->lock); + + dev_info(&(pdev->dev), "Mailbox driver loaded\n"); + +exit: + return res; +} + +static struct platform_driver mbox_driver = { + .driver = { + .name = MBOX_NAME, + .owner = THIS_MODULE, + }, +}; + +static int __init mbox_init(void) +{ + return platform_driver_probe(&mbox_driver, mbox_probe); +} + +module_init(mbox_init); + +void __exit mbox_exit(void) +{ + platform_driver_unregister(&mbox_driver); +} + +module_exit(mbox_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("MBOX driver"); diff --git a/drivers/misc/mbox_channels-db5500.c b/drivers/misc/mbox_channels-db5500.c new file mode 100644 index 00000000000..78801487e4b --- /dev/null +++ b/drivers/misc/mbox_channels-db5500.c @@ -0,0 +1,1137 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * Mailbox Logical Driver + * + * Author: Marcin Mielczarczyk <marcin.mielczarczyk@tieto.com> for ST-Ericsson. + * Bibek Basu ,bibek.basu@stericsson.com> + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/device.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/random.h> +#include <mach/mbox.h> +#include <mach/mbox_channels-db5500.h> +#include <linux/io.h> + +/* Defines start sequence number for given mailbox channel */ +#define CHANNEL_START_SEQUENCE_NUMBER 0x80 + +/* Defines number of channels per mailbox unit */ +#define CHANNELS_PER_MBOX_UNIT 256 + +/* + * This macro builds mbox channel PDU header with following format: + * --------------------------------------------------------------------------- + * | | | | | + * | Sequence nmbr | Type | Length | Destination logical channel number | + * | | | | | + * --------------------------------------------------------------------------- + * 31 24 20 16 0 + * + */ +#define BUILD_HEADER(chan, len, type, seq_no) \ + ((chan) | (((len) & 0xf) << 16) | \ + (((type) & 0xf) << 20) | ((seq_no) << 24)) + +/* Returns type from mbox message header */ +#define GET_TYPE(mbox_msg) (((mbox_msg) >> 20) & 0xf) + +/* Returns channel number from mbox message header */ +#define GET_CHANNEL(mbox_msg) ((mbox_msg) & 0xffff) + +/* Returns length of payload from mbox message header */ +#define GET_LENGTH(mbox_msg) (((mbox_msg) >> 16) & 0xf) + +/* Returns sequence number from mbox message header */ +#define GET_SEQ_NUMBER(mbox_msg) (((mbox_msg) >> 24) + +/* Number of buffers */ +#define NUM_DSP_BUFFER 3 + +/* circular buffer indicator */ +static int buffer_index; + +enum mbox_msg{ + MBOX_CLOSE, + MBOX_OPEN, + MBOX_SEND, + MBOX_CAST, + MBOX_ACK, + MBOX_NAK, +}; + +enum mbox_dir { + MBOX_TX, + MBOX_RX, +}; + +struct mbox_channel_mapping { + u16 chan_base; + u8 mbox_id; + enum mbox_dir direction; +}; + +/* This table maps mbox logical channel to mbox id and direction */ +static struct mbox_channel_mapping channel_mappings[] = { + {0x500, 2, MBOX_RX}, /* channel 5 maps to mbox 0.1, dsp->app (unsec) */ + {0x900, 2, MBOX_TX}, /* channel 9 maps to mbox 0.0, app->dsp (unsec) */ +}; + +/* This table specifies mailbox ids which mbox channels module will use */ +static u8 mbox_ids[] = { + 2, /* app <-> dsp (unsec) */ +}; + +/** + * struct mbox_unit_status - current status of mbox unit + * @mbox_id : holds mbox unit identification number + * @mbox : holds mbox pointer after mbox_register() call + * @tx_chans : holds list of open tx mbox channels + * @tx_lock: lock for tx channel + * @rx_chans : holds list of open rx mbox channels + * @rx_lock: lock for rx channel + */ +struct mbox_unit_status { + u8 mbox_id; + struct mbox *mbox; + struct list_head tx_chans; + spinlock_t tx_lock; + struct list_head rx_chans; + spinlock_t rx_lock; +}; + +static struct { + struct platform_device *pdev; + struct mbox_unit_status mbox_unit[ARRAY_SIZE(mbox_ids)]; +} channels; + +/* This structure describes pending element for mbox tx channel */ +struct pending_elem { + struct list_head list; + u32 *data; + u8 length; +}; + +struct rx_pending_elem { + u32 buffer[MAILBOX_NR_OF_DATAWORDS]; + u8 length; + void *priv; +}; + +struct rx_pending_elem rx_pending[NUM_DSP_BUFFER + 1]; + +/* This structure holds list of pending elements for mbox tx channel */ +struct tx_channel { + struct list_head pending; +}; + +/* Specific status for mbox rx channel */ +struct rx_channel { + struct list_head pending; + spinlock_t lock; + u32 buffer[MAILBOX_NR_OF_DATAWORDS]; + u8 index; + u8 length; +}; + +/* This structure holds status of mbox channel - common for tx and rx */ +struct channel_status { + struct list_head list; + u16 channel; + int state; + mbox_channel_cb_t *cb; + void *priv; + u8 seq_number; + bool with_ack; + struct rx_channel rx; + struct tx_channel tx; + struct work_struct receive_msg; + struct work_struct open_msg; + struct work_struct cast_msg; + struct mutex lock; +}; + +/* Checks if provided channel number is valid */ +static bool check_channel(u16 channel, enum mbox_dir direction) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(channel_mappings); i++) { + if ((channel >= channel_mappings[i].chan_base) && + (channel < channel_mappings[i].chan_base + + CHANNELS_PER_MBOX_UNIT)) { + /* Check if direction of given channel is correct*/ + if (channel_mappings[i].direction == direction) + return true; + else + break; + } + } + return false; +} + +/* get the tx channel corresponding to the given rx channel */ +static u16 get_tx_channel(u16 channel) +{ + int i; + int relative_chan = 0; + int mbox_id = 0xFF; + u16 tx_channel = 0xFF; + + for (i = 0; i < ARRAY_SIZE(channel_mappings); i++) { + if ((channel >= channel_mappings[i].chan_base) && + (channel < channel_mappings[i].chan_base + + CHANNELS_PER_MBOX_UNIT)) { + /* Check if direction of given channel is correct*/ + relative_chan = channel - channel_mappings[i].chan_base; + mbox_id = channel_mappings[i].mbox_id; + + } + } + + for (i = 0; i < ARRAY_SIZE(channel_mappings); i++) { + if ((mbox_id == channel_mappings[i].mbox_id) && + (channel_mappings[i].direction == MBOX_TX)) + tx_channel = channel_mappings[i].chan_base + + relative_chan; + } + return tx_channel; +} + +/* Returns mbox unit id for given mbox channel */ +static int get_mbox_id(u16 channel) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(channel_mappings); i++) { + if ((channel >= channel_mappings[i].chan_base) && + (channel < channel_mappings[i].chan_base + + CHANNELS_PER_MBOX_UNIT)) { + return channel_mappings[i].mbox_id; + } + } + /* There is no mbox unit registered for given channel */ + return -EINVAL; +} + +/* Returns mbox structure saved after mbox_register() call */ +static struct mbox *get_mbox(u16 channel) +{ + int i; + int mbox_id = get_mbox_id(channel); + + if (mbox_id < 0) { + dev_err(&channels.pdev->dev, "couldn't get mbox id\n"); + return NULL; + } + + for (i = 0; i < ARRAY_SIZE(channels.mbox_unit); i++) { + if (channels.mbox_unit[i].mbox_id == mbox_id) + return channels.mbox_unit[i].mbox; + } + return NULL; +} + +/* Returns pointer to rx mbox channels list for given mbox unit */ +static struct list_head *get_rx_list(u8 mbox_id) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(mbox_ids); i++) { + if (channels.mbox_unit[i].mbox_id == mbox_id) + return &channels.mbox_unit[i].rx_chans; + } + return NULL; +} + +/* Returns pointer to tx mbox channels list for given mbox unit */ +static struct list_head *get_tx_list(u8 mbox_id) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(mbox_ids); i++) { + if (channels.mbox_unit[i].mbox_id == mbox_id) + return &channels.mbox_unit[i].tx_chans; + } + return NULL; +} + +static int send_pdu(struct channel_status *chan_status, int command, + u16 channel) +{ + struct mbox *mbox; + u32 header = 0; + int ret = 0; + + /* SEND PDU is not supported */ + if (command == MBOX_SEND) { + dev_err(&channels.pdev->dev, "SEND command not implemented\n"); + ret = -EINVAL; + goto exit; + } + mbox = get_mbox(chan_status->channel); + if (mbox == NULL) { + dev_err(&channels.pdev->dev, "couldn't get mailbox\n"); + ret = -ENOSYS; + goto exit; + } + /* For CAST type send all pending messages */ + if (command == MBOX_CAST) { + struct list_head *pos, *n; + + /* Send all pending messages from TX channel */ + list_for_each_safe(pos, n, &chan_status->tx.pending) { + struct pending_elem *pending = + list_entry(pos, struct pending_elem, list); + int i; + + header = BUILD_HEADER(channel, + pending->length, + command, + chan_status->seq_number); + + ret = mbox_send(mbox, header, true); + if (ret < 0) { + dev_err(&channels.pdev->dev, + "failed to send header, err=%d\n", ret); + goto exit; + } + + for (i = 0; i < pending->length; i++) { + ret = mbox_send(mbox, pending->data[i], true); + if (ret < 0) { + dev_err(&channels.pdev->dev, + "failed to send header, err=%d\n", ret); + goto exit; + } + } + + /* Call client's callback that data is already sent */ + if (chan_status->cb) + chan_status->cb(pending->data, pending->length, + chan_status->priv); + else + dev_err(&channels.pdev->dev, + "%s no callback provided\n", __func__); + + /* Increment sequence number */ + chan_status->seq_number++; + + /* Remove and free element from the list */ + list_del(&pending->list); + kfree(pending); + } + } else { + header = BUILD_HEADER(channel, 0, + command, chan_status->seq_number); + + ret = mbox_send(mbox, header, true); + if (ret < 0) + dev_err(&channels.pdev->dev, "failed to send header\n"); + /* Increment sequence number */ + chan_status->seq_number++; + } + +exit: + return ret; +} + +void mbox_handle_receive_msg(struct work_struct *work) +{ + struct channel_status *rx_chan = container_of(work, + struct channel_status, + receive_msg); + + /* Call client's callback and reset state */ + if (rx_chan->cb) { + static int rx_pending_count; + + if (rx_pending_count == NUM_DSP_BUFFER) + rx_pending_count = 0; + else + rx_pending_count++; + rx_chan->cb(rx_pending[rx_pending_count].buffer, + rx_pending[rx_pending_count].length, + rx_pending[rx_pending_count].priv); + buffer_index--; + } else { + dev_err(&channels.pdev->dev, + "%s no callback provided\n", __func__); + } + +} + +void mbox_handle_open_msg(struct work_struct *work) +{ + struct channel_status *tx_chan = container_of(work, + struct channel_status, + open_msg); + /* Change channel state to OPEN */ + tx_chan->state = MBOX_OPEN; + /* If pending list not empty, start sending data */ + if (!list_empty(&tx_chan->tx.pending)) + send_pdu(tx_chan, MBOX_CAST, tx_chan->channel); +} + +void mbox_handle_cast_msg(struct work_struct *work) +{ + struct channel_status *rx_chan = container_of(work, + struct channel_status, + cast_msg); + /* Check if channel is opened */ + if (rx_chan->state == MBOX_CLOSE) { + /* Peer sent message to closed channel */ + dev_err(&channels.pdev->dev, + "channel in wrong state\n"); + } +} + +static bool handle_receive_msg(u32 mbox_msg, struct list_head *rx_list) +{ + struct list_head *pos; + struct channel_status *tmp; + int i; + static int rx_pending_count; + struct channel_status *rx_chan = NULL; + struct mbox_unit_status *mbox_unit = container_of(rx_list, + struct mbox_unit_status, + rx_chans); + spin_lock(&mbox_unit->rx_lock); + list_for_each(pos, rx_list) { + tmp = list_entry(pos, struct channel_status, list); + if (tmp->state == MBOX_SEND || + tmp->state == MBOX_CAST) + /* Received message is payload */ + rx_chan = tmp; + } + /* Received message is header */ + spin_unlock(&mbox_unit->rx_lock); + if (rx_chan) { + /* Store received data in RX channel buffer */ + rx_chan->rx.buffer[rx_chan->rx.index++] = mbox_msg; + /* Check if it's last data of PDU */ + if (rx_chan->rx.index == rx_chan->rx.length) { + if (rx_pending_count == NUM_DSP_BUFFER) + rx_pending_count = 0; + else + rx_pending_count++; + for (i = 0; i < MAILBOX_NR_OF_DATAWORDS; i++) { + rx_pending[rx_pending_count].buffer[i] = + rx_chan->rx.buffer[i]; + } + rx_pending[rx_pending_count].length = + rx_chan->rx.length; + rx_pending[rx_pending_count].priv = rx_chan->priv; + rx_chan->rx.index = 0; + rx_chan->rx.length = 0; + rx_chan->state = MBOX_OPEN; + rx_chan->seq_number++; + buffer_index++; + if (buffer_index >= NUM_DSP_BUFFER) + dev_err(&channels.pdev->dev, + "rxbuf overflow%d\n", buffer_index); + schedule_work(&rx_chan->receive_msg); + } + dev_dbg(&channels.pdev->dev, "%s OK\n", __func__); + return true; + } + return false; +} + +static void handle_open_msg(u16 channel, u8 mbox_id) +{ + struct list_head *tx_list, *pos; + struct channel_status *tmp; + struct channel_status *tx_chan = NULL; + struct mbox_unit_status *mbox_unit; + channel = get_tx_channel(channel); + dev_dbg(&channels.pdev->dev, "%s mbox_id %d\tchannel %x\n", + __func__, mbox_id, channel); + /* Get TX channesenx for given mbox unit */ + tx_list = get_tx_list(mbox_id); + if (tx_list == NULL) { + dev_err(&channels.pdev->dev, "given mbox id is not valid %d\n", + mbox_id); + return; + } + mbox_unit = container_of(tx_list, struct mbox_unit_status, tx_chans); + /* Search for channel in tx list */ + spin_lock(&mbox_unit->tx_lock); + list_for_each(pos, tx_list) { + tmp = list_entry(pos, struct channel_status, list); + dev_dbg(&channels.pdev->dev, "tmp->channel=%d\n", + tmp->channel); + if (tmp->channel == channel) + tx_chan = tmp; + } + spin_unlock(&mbox_unit->tx_lock); + if (tx_chan) { + schedule_work(&tx_chan->open_msg); + } else { + /* No tx channel found on the list, allocate new element */ + tx_chan = kzalloc(sizeof(*tx_chan), GFP_KERNEL); + if (tx_chan == NULL) { + dev_err(&channels.pdev->dev, + "failed to allocate memory\n"); + return; + } + + /* Fill initial data and add this element to tx list */ + tx_chan->channel = get_tx_channel(channel); + tx_chan->state = MBOX_OPEN; + tx_chan->seq_number = CHANNEL_START_SEQUENCE_NUMBER; + INIT_LIST_HEAD(&tx_chan->tx.pending); + INIT_WORK(&tx_chan->open_msg, mbox_handle_open_msg); + INIT_WORK(&tx_chan->cast_msg, mbox_handle_cast_msg); + INIT_WORK(&tx_chan->receive_msg, mbox_handle_receive_msg); + mutex_init(&tx_chan->lock); + spin_lock(&mbox_unit->tx_lock); + list_add_tail(&tx_chan->list, tx_list); + spin_unlock(&mbox_unit->tx_lock); + } +} + +static void handle_cast_msg(u16 channel, struct list_head *rx_list, + u32 mbox_msg, bool send) +{ + struct list_head *pos; + struct channel_status *tmp; + struct channel_status *rx_chan = NULL; + struct mbox_unit_status *mbox_unit = container_of(rx_list, + struct mbox_unit_status, + rx_chans); + dev_dbg(&channels.pdev->dev, " %s\n", __func__); + /* Search for channel in rx list */ + spin_lock(&mbox_unit->rx_lock); + list_for_each(pos, rx_list) { + tmp = list_entry(pos, struct channel_status, list); + if (tmp->channel == channel) + rx_chan = tmp; + } + spin_unlock(&mbox_unit->rx_lock); + + if (rx_chan) { + rx_chan->rx.buffer[0] = mbox_msg; + rx_chan->with_ack = send; + rx_chan->rx.length = GET_LENGTH(rx_chan->rx.buffer[0]); + if (rx_chan->rx.length <= MAILBOX_NR_OF_DATAWORDS && + rx_chan->rx.length > 0) { + rx_chan->rx.index = 0; + rx_chan->state = MBOX_CAST; + } + schedule_work(&rx_chan->cast_msg); + } else { + /* Channel not found, peer sent wrong message */ + dev_err(&channels.pdev->dev, "channel %d doesn't exist\n", + channel); + } +} + +/* + * This callback is called whenever mbox unit receives data. + * priv parameter holds mbox unit id. + */ +static void mbox_cb(u32 mbox_msg, void *priv) +{ + u8 mbox_id = *(u8 *)priv; + struct list_head *rx_list; + u8 type = GET_TYPE(mbox_msg); + u16 channel = GET_CHANNEL(mbox_msg); + + dev_dbg(&channels.pdev->dev, "%s type %d\t, mbox_msg %x\n", + __func__, type, mbox_msg); + /* Get RX channels list for given mbox unit */ + rx_list = get_rx_list(mbox_id); + if (rx_list == NULL) { + dev_err(&channels.pdev->dev, "given mbox id is not valid %d\n", + mbox_id); + return; + } + + /* If received message is payload this function will take care of it */ + if (handle_receive_msg(mbox_msg, rx_list)) + return; + + /* Received message is header as no RX channel is in SEND/CAST state */ + switch (type) { + case MBOX_CLOSE: + /* Not implemented */ + break; + case MBOX_OPEN: + handle_open_msg(channel, mbox_id); + break; + case MBOX_SEND: + handle_cast_msg(channel, rx_list, mbox_msg, true); + break; + case MBOX_CAST: + handle_cast_msg(channel, rx_list, mbox_msg, false); + break; + case MBOX_ACK: + case MBOX_NAK: + /* Not implemented */ + break; + } +} + +int mbox_channel_register(u16 channel, mbox_channel_cb_t *cb, void *priv) +{ + struct channel_status *rx_chan; + struct list_head *pos, *rx_list; + int res = 0; + struct mbox_unit_status *mbox_unit; + + dev_dbg(&channels.pdev->dev, " %s channel = %d\n", __func__, channel); + /* Closing of channels is not implemented */ + if (cb == NULL) { + dev_err(&channels.pdev->dev, + "channel close is not implemented\n"); + res = -EINVAL; + goto exit; + } + + /* Check if provided channel number is valid */ + if (!check_channel(channel, MBOX_RX)) { + dev_err(&channels.pdev->dev, "wrong mbox channel number %d\n", + channel); + res = -EINVAL; + goto exit; + } + + rx_list = get_rx_list(get_mbox_id(channel)); + if (rx_list == NULL) { + dev_err(&channels.pdev->dev, "given mbox id is not valid\n"); + res = -EINVAL; + goto exit; + } + + mbox_unit = container_of(rx_list, struct mbox_unit_status, rx_chans); + + /* Check if channel is already registered */ + spin_lock(&mbox_unit->rx_lock); + list_for_each(pos, rx_list) { + rx_chan = list_entry(pos, struct channel_status, list); + + if (rx_chan->channel == channel) { + dev_dbg(&channels.pdev->dev, + "channel already registered\n"); + rx_chan->cb = cb; + rx_chan->priv = priv; + spin_unlock(&mbox_unit->rx_lock); + goto exit; + } + } + spin_unlock(&mbox_unit->rx_lock); + + rx_chan = kzalloc(sizeof(*rx_chan), GFP_KERNEL); + if (rx_chan == NULL) { + dev_err(&channels.pdev->dev, + "couldn't allocate channel status\n"); + res = -ENOMEM; + goto exit; + } + + /* Fill out newly allocated element and add it to rx list */ + rx_chan->channel = channel; + rx_chan->cb = cb; + rx_chan->priv = priv; + rx_chan->seq_number = CHANNEL_START_SEQUENCE_NUMBER; + mutex_init(&rx_chan->lock); + INIT_LIST_HEAD(&rx_chan->rx.pending); + INIT_WORK(&rx_chan->open_msg, mbox_handle_open_msg); + INIT_WORK(&rx_chan->cast_msg, mbox_handle_cast_msg); + INIT_WORK(&rx_chan->receive_msg, mbox_handle_receive_msg); + spin_lock(&mbox_unit->rx_lock); + list_add_tail(&rx_chan->list, rx_list); + spin_unlock(&mbox_unit->rx_lock); + + mutex_lock(&rx_chan->lock); + res = send_pdu(rx_chan, MBOX_OPEN, get_tx_channel(rx_chan->channel)); + if (res) { + dev_err(&channels.pdev->dev, "failed to send OPEN command\n"); + spin_lock(&mbox_unit->rx_lock); + list_del(&rx_chan->list); + spin_unlock(&mbox_unit->rx_lock); + kfree(rx_chan); + } else { + rx_chan->seq_number++; + rx_chan->state = MBOX_OPEN; + } + mutex_unlock(&rx_chan->lock); +exit: + return res; +} +EXPORT_SYMBOL(mbox_channel_register); + +int mbox_channel_send(struct mbox_channel_msg *msg) +{ + struct list_head *pos, *tx_list; + struct channel_status *tmp = NULL; + struct channel_status *tx_chan = NULL; + struct pending_elem *pending; + struct mbox_unit_status *mbox_unit; + + if (msg->length > MAILBOX_NR_OF_DATAWORDS || msg->length == 0) { + dev_err(&channels.pdev->dev, "data length incorrect\n"); + return -EINVAL; + } + + if (!check_channel(msg->channel, MBOX_TX)) { + dev_err(&channels.pdev->dev, "wrong channel number %d\n", + msg->channel); + return -EINVAL; + } + + tx_list = get_tx_list(get_mbox_id(msg->channel)); + if (tx_list == NULL) { + dev_err(&channels.pdev->dev, "given mbox id is not valid\n"); + return -EINVAL; + } + + mbox_unit = container_of(tx_list, struct mbox_unit_status, tx_chans); + + spin_lock(&mbox_unit->tx_lock); + dev_dbg(&channels.pdev->dev, "send:tx_list=%x\tmbox_unit=%x\n", + (u32)tx_list, (u32)mbox_unit); + list_for_each(pos, tx_list) { + tmp = list_entry(pos, struct channel_status, list); + if (tmp->channel == msg->channel) + tx_chan = tmp; + } + spin_unlock(&mbox_unit->tx_lock); + /* Allocate pending element and add it to the list */ + pending = kzalloc(sizeof(*pending), GFP_KERNEL); + if (pending == NULL) { + dev_err(&channels.pdev->dev, + "couldn't allocate memory for pending\n"); + return -ENOMEM; + } + pending->data = msg->data; + pending->length = msg->length; + + if (tx_chan) { + mutex_lock(&tx_chan->lock); + list_add_tail(&pending->list, &tx_chan->tx.pending); + tx_chan->cb = msg->cb; + tx_chan->priv = msg->priv; + /* If channel is already opened start sending data */ + if (tx_chan->state == MBOX_OPEN) + send_pdu(tx_chan, MBOX_CAST, tx_chan->channel); + /* Stop processing here */ + mutex_unlock(&tx_chan->lock); + } else { + /* No channel found on the list, allocate new element */ + tx_chan = kzalloc(sizeof(*tx_chan), GFP_KERNEL); + if (tx_chan == NULL) { + dev_err(&channels.pdev->dev, + "couldn't allocate memory for \ + tx_chan\n"); + return -ENOMEM; + } + tx_chan->channel = msg->channel; + tx_chan->cb = msg->cb; + tx_chan->priv = msg->priv; + tx_chan->state = MBOX_CLOSE; + tx_chan->seq_number = CHANNEL_START_SEQUENCE_NUMBER; + INIT_LIST_HEAD(&tx_chan->tx.pending); + INIT_WORK(&tx_chan->open_msg, mbox_handle_open_msg); + INIT_WORK(&tx_chan->cast_msg, mbox_handle_cast_msg); + INIT_WORK(&tx_chan->receive_msg, mbox_handle_receive_msg); + mutex_init(&tx_chan->lock); + spin_lock(&mbox_unit->tx_lock); + list_add_tail(&tx_chan->list, tx_list); + spin_unlock(&mbox_unit->tx_lock); + mutex_lock(&tx_chan->lock); + list_add_tail(&pending->list, &tx_chan->tx.pending); + mutex_unlock(&tx_chan->lock); + } + return 0; +} +EXPORT_SYMBOL(mbox_channel_send); + +static void revoke_pending_msgs(struct channel_status *tx_chan) +{ + struct list_head *pos, *n; + struct pending_elem *pending; + + list_for_each_safe(pos, n, &tx_chan->tx.pending) { + pending = list_entry(pos, struct pending_elem, list); + + if (tx_chan->cb) + tx_chan->cb(pending->data, pending->length, + tx_chan->priv); + else + dev_err(&channels.pdev->dev, + "%s no callback provided\n", __func__); + list_del(&pending->list); + kfree(pending); + } +} + +/* Clear all pending messages from TX channel */ +int mbox_channel_revoke_messages(u16 channel) +{ + struct list_head *pos, *tx_list; + struct channel_status *tmp; + struct channel_status *tx_chan = NULL; + struct mbox_unit_status *mbox_unit; + int res = 0; + + if (!check_channel(channel, MBOX_TX)) { + dev_err(&channels.pdev->dev, + "wrong channel number %d\n", channel); + return -EINVAL; + } + + tx_list = get_tx_list(get_mbox_id(channel)); + if (tx_list == NULL) { + dev_err(&channels.pdev->dev, "given mbox id is not valid\n"); + return -EINVAL; + } + + mbox_unit = container_of(tx_list, struct mbox_unit_status, tx_chans); + + spin_lock(&mbox_unit->tx_lock); + list_for_each(pos, tx_list) { + tmp = list_entry(pos, struct channel_status, list); + if (tmp->channel == channel) + tx_chan = tmp; + } + spin_unlock(&mbox_unit->tx_lock); + + if (tx_chan) { + mutex_lock(&tx_chan->lock); + revoke_pending_msgs(tx_chan); + mutex_unlock(&tx_chan->lock); + dev_dbg(&channels.pdev->dev, "channel %d cleared\n", + channel); + } else { + dev_err(&channels.pdev->dev, "no channel found\n"); + res = -EINVAL; + } + + dev_dbg(&channels.pdev->dev, "%s exiting %d\n", __func__, res); + return res; +} +EXPORT_SYMBOL(mbox_channel_revoke_messages); + +#if defined(CONFIG_DEBUG_FS) +#define MBOXTEST_DEBUG 1 +#ifdef MBOXTEST_DEBUG +#define DBG_TEST(x) x +#else +#define DBG_TEST(x) +#endif + +#define MBOX_TEST_MAX_WORDS 3 +#define MBOX_RX_CHAN 0x500 +#define MBOX_TX_RX_CHANNEL_DIFF 0x400 +#define MBOX_MAX_NUM_TRANSFER 30000 +static int registration_done; +/** + * struct mboxtest_data - mbox test via debugfs information + * @rx_buff: Buffer for incomming data + * @rx_pointer: Ptr to actual RX data buff + * @tx_buff Buffer for outgoing data + * @tx_pointer: Ptr to actual TX data buff + * @tx_done: TX Transfer done indicator + * @rx_done: RX Transfer done indicator + * @received Received words + * @xfer_words: Num of bytes in actual trf + * @xfers: Number of transfers + * @words Number of total words + * @channel: Channel test number + */ +struct mboxtest_data { + unsigned int *rx_buff; + unsigned int *rx_pointer; + unsigned int *tx_buff; + unsigned int *tx_pointer; + struct completion tx_done; + struct completion rx_done; + int received; + int xfer_words; + int xfers; + int words; + int channel; +}; + +static void mboxtest_receive_cb(u32 *data, u32 len, void *arg) +{ + struct mboxtest_data *mboxtest = (struct mboxtest_data *) arg; + int i; + + printk(KERN_INFO "receive_cb.. data.= 0x%X, len = %d\n", + *data, len); + + for (i = 0; i < len; i++) + *(mboxtest->rx_pointer++) = *(data++); + + mboxtest->received += len; + + printk(KERN_INFO "received = %d, words = %d\n", + mboxtest->received, mboxtest->words); + if (mboxtest->received >= mboxtest->words) + complete(&mboxtest->rx_done); + dev_dbg(&channels.pdev->dev, "%s exiting\n", __func__); +} + +static void mboxtest_send_cb(u32 *data, u32 len, void *arg) +{ + struct mboxtest_data *mboxtest = (struct mboxtest_data *) arg; + + printk(KERN_INFO "send_cb.. data.= 0x%X, len = %d\n", + *data, len); + + complete(&mboxtest->tx_done); + dev_dbg(&channels.pdev->dev, "kernel:mboxtest_send_cb exiting\n"); +} + +static int mboxtest_transmit(struct mboxtest_data *mboxtest) +{ + int status = 0; + struct mbox_channel_msg msg; + + dev_dbg(&channels.pdev->dev, "%s entering\n", __func__); + init_completion(&mboxtest->tx_done); + + msg.channel = mboxtest->channel; + msg.data = mboxtest->tx_pointer; + msg.length = mboxtest->words; + msg.cb = mboxtest_send_cb; + msg.priv = mboxtest; + + status = mbox_channel_send(&msg); + if (!status) { + mboxtest->tx_pointer += mboxtest->xfer_words; + wait_for_completion(&mboxtest->tx_done); + } + + dev_dbg(&channels.pdev->dev, "%s exiting %d\n", + __func__, status); + return status; +} + +static int transfer_test(struct mboxtest_data *mboxtest) +{ + int status = 0; + int len = 0; + int i; + + len = mboxtest->words; + + dev_dbg(&channels.pdev->dev, "%s enterring\n", __func__); + /* Allocate buffers */ + mboxtest->rx_buff = kzalloc(sizeof(unsigned int) * len, GFP_KERNEL); + if (!mboxtest->rx_buff) { + DBG_TEST(printk(KERN_INFO + "Cannot allocate mbox rx memory\n")); + status = -ENOMEM; + goto err1; + } + memset(mboxtest->rx_buff, '\0', sizeof(unsigned int) * len); + + mboxtest->tx_buff = kzalloc(sizeof(unsigned int) * len, GFP_KERNEL); + if (!mboxtest->tx_buff) { + DBG_TEST(printk(KERN_INFO + "Cannot allocate mbox tx memory\n")); + status = -ENOMEM; + goto err2; + } + memset(mboxtest->tx_buff, '\0', sizeof(unsigned int) * len); + + /* Generate data */ + get_random_bytes((unsigned char *)mboxtest->tx_buff, + sizeof(unsigned int) * len); + /* Set pointers */ + mboxtest->tx_pointer = mboxtest->tx_buff; + mboxtest->rx_pointer = mboxtest->rx_buff; + mboxtest->received = 0; + init_completion(&mboxtest->rx_done); + + /* Start tx transfer test transfer */ + status = mboxtest_transmit(mboxtest); + DBG_TEST(printk(KERN_INFO "xfer_words=%d\n", + mboxtest->xfer_words)); + if (!status) + wait_for_completion(&mboxtest->rx_done); + for (i = 0; i < len; i++) + DBG_TEST(printk(KERN_INFO "%d -> TX:0x%X, RX:0x%X\n", i, + mboxtest->tx_buff[i], mboxtest->rx_buff[i])); + + dev_dbg(&channels.pdev->dev, "%s exiting %d\n", __func__, status); + return status; +err2: + kfree(mboxtest->rx_buff); +err1: + return status; +} + +static int mboxtest_prepare(struct mboxtest_data *mboxtest) +{ + int err = 0; + + mboxtest->xfers = MBOX_MAX_NUM_TRANSFER; + /* Calculate number of bytes in each transfer */ + mboxtest->xfer_words = mboxtest->words / mboxtest->xfers; + + /* Trim to maxiumum data words per transfer */ + if (mboxtest->xfer_words > MBOX_TEST_MAX_WORDS) { + DBG_TEST(printk(KERN_INFO "Recalculating xfers ...\n")); + mboxtest->xfer_words = MBOX_TEST_MAX_WORDS; + if (mboxtest->words % mboxtest->xfer_words) + mboxtest->xfers = (mboxtest->words / + mboxtest->xfer_words) + 1; + else + mboxtest->xfers = (mboxtest->words / + mboxtest->xfer_words); + } + + DBG_TEST(printk(KERN_INFO "Params: chan=0x%X words=%d, xfers=%d\n", + mboxtest->channel, mboxtest->words, + mboxtest->xfers)); + + if (mbox_channel_register(mboxtest->channel, + mboxtest_receive_cb, mboxtest)) { + DBG_TEST(printk(KERN_INFO "Cannot register mbox channel\n")); + err = -ENOMEM; + goto err; + } + + registration_done = true; + return 0; +err: + kfree(mboxtest); + return err; +} + +struct mboxtest_data mboxtest; +/* + * Expected input: <nbr_channel> <nbr_word> + * Example: "echo 500 2" + */ +static ssize_t mbox_write_channel(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + unsigned long nbr_channel; + unsigned long nbr_word; + char int_buf[16]; + char *token; + char *val; + + strncpy((char *) &int_buf, buf, sizeof(int_buf)); + token = (char *) &int_buf; + + /* Parse message */ + val = strsep(&token, " "); + if ((val == NULL) || (strict_strtoul(val, 16, &nbr_channel) != 0)) + nbr_channel = MBOX_RX_CHAN; + + val = strsep(&token, " "); + if ((val == NULL) || (strict_strtoul(val, 16, &nbr_word) != 0)) + nbr_word = 2; + + dev_dbg(dev, "Will setup logical channel %ld\n", nbr_channel); + mboxtest.channel = nbr_channel; + mboxtest.words = nbr_word; + + if (!registration_done) + mboxtest_prepare(&mboxtest); + else + dev_dbg(&channels.pdev->dev, "already registration done\n"); + + return count; +} + +static ssize_t mbox_read_channel(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + + unsigned long i; + static bool config_done; + + if (!config_done) { + config_done = true; + mboxtest.channel += MBOX_TX_RX_CHANNEL_DIFF; + } + dev_dbg(dev, "Will transfer %d words %d times at channel 0x%x\n", + mboxtest.words, mboxtest.xfers, mboxtest.channel); + for (i = 0; i < mboxtest.xfers; i++) + transfer_test(&mboxtest); + + return 1; +} +static DEVICE_ATTR(channel, S_IWUGO | S_IRUGO, mbox_read_channel, + mbox_write_channel); + +#endif + +static int __init mbox_channel_probe(struct platform_device *pdev) +{ + int i, ret = 0; + struct mbox *mbox; + + dev_dbg(&(pdev->dev), "Probing mailbox (pdev = 0x%X)...\n", (u32)pdev); + + /* Register to given mailbox units (ids) */ + for (i = 0; i < ARRAY_SIZE(mbox_ids); i++) { + mbox = mbox_setup(mbox_ids[i], mbox_cb, &mbox_ids[i]); + if (mbox == NULL) { + dev_err(&(pdev->dev), "Unable to setup mailbox %d\n", + mbox_ids[i]); + ret = -EBUSY; + goto exit; + } + channels.mbox_unit[i].mbox_id = mbox_ids[i]; + channels.mbox_unit[i].mbox = mbox; + INIT_LIST_HEAD(&channels.mbox_unit[i].rx_chans); + INIT_LIST_HEAD(&channels.mbox_unit[i].tx_chans); + spin_lock_init(&channels.mbox_unit[i].rx_lock); + spin_lock_init(&channels.mbox_unit[i].tx_lock); + } + + channels.pdev = pdev; + + dev_dbg(&(pdev->dev), "Mailbox channel driver loaded\n"); +#if defined(CONFIG_DEBUG_FS) + ret = device_create_file(&(pdev->dev), &dev_attr_channel); + if (ret != 0) + dev_warn(&(pdev->dev), + "Unable to create mbox_channel sysfs entry"); + + +#endif +exit: + return ret; +} + +static struct platform_driver mbox_channel_driver = { + .driver = { + .name = "mbox_channel", + .owner = THIS_MODULE, + }, +}; + +static int __init mbox_channel_init(void) +{ + platform_device_register_simple("mbox_channel", 0, NULL, 0); + + return platform_driver_probe(&mbox_channel_driver, mbox_channel_probe); +} +module_init(mbox_channel_init); + +static void __exit mbox_channel_exit(void) +{ + platform_driver_unregister(&mbox_channel_driver); +} +module_exit(mbox_channel_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("MBOX channels driver"); diff --git a/drivers/misc/pmem.c b/drivers/misc/pmem.c new file mode 100644 index 00000000000..0bda86bad25 --- /dev/null +++ b/drivers/misc/pmem.c @@ -0,0 +1,1408 @@ +/* drivers/android/pmem.c + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/miscdevice.h> +#include <linux/platform_device.h> +#include <linux/fs.h> +#include <linux/file.h> +#include <linux/mm.h> +#include <linux/list.h> +#include <linux/debugfs.h> +#include <linux/android_pmem.h> +#include <linux/mempolicy.h> +#include <linux/sched.h> +#include <linux/dma-mapping.h> +#include <asm/io.h> +#include <asm/uaccess.h> +#include <asm/cacheflush.h> + +#define PMEM_MAX_DEVICES 10 +#define PMEM_MAX_ORDER 128 +#define PMEM_MIN_ALLOC PAGE_SIZE + +#define PMEM_DEBUG 0 + +/* indicates that a refernce to this file has been taken via get_pmem_file, + * the file should not be released until put_pmem_file is called */ +#define PMEM_FLAGS_BUSY 0x1 +/* indicates that this is a suballocation of a larger master range */ +#define PMEM_FLAGS_CONNECTED 0x1 << 1 +/* indicates this is a master and not a sub allocation and that it is mmaped */ +#define PMEM_FLAGS_MASTERMAP 0x1 << 2 +/* submap and unsubmap flags indicate: + * 00: subregion has never been mmaped + * 10: subregion has been mmaped, reference to the mm was taken + * 11: subretion has ben released, refernece to the mm still held + * 01: subretion has been released, reference to the mm has been released + */ +#define PMEM_FLAGS_SUBMAP 0x1 << 3 +#define PMEM_FLAGS_UNSUBMAP 0x1 << 4 + + +struct pmem_data { + /* in alloc mode: an index into the bitmap + * in no_alloc mode: the size of the allocation */ + int index; + /* see flags above for descriptions */ + unsigned int flags; + /* protects this data field, if the mm_mmap sem will be held at the + * same time as this sem, the mm sem must be taken first (as this is + * the order for vma_open and vma_close ops */ + struct rw_semaphore sem; + /* info about the mmaping process */ + struct vm_area_struct *vma; + /* task struct of the mapping process */ + struct task_struct *task; + /* process id of teh mapping process */ + pid_t pid; + /* file descriptor of the master */ + int master_fd; + /* file struct of the master */ + struct file *master_file; + /* a list of currently available regions if this is a suballocation */ + struct list_head region_list; + /* a linked list of data so we can access them for debugging */ + struct list_head list; +#if PMEM_DEBUG + int ref; +#endif +}; + +struct pmem_bits { + unsigned allocated:1; /* 1 if allocated, 0 if free */ + unsigned order:7; /* size of the region in pmem space */ +}; + +struct pmem_region_node { + struct pmem_region region; + struct list_head list; +}; + +#define PMEM_DEBUG_MSGS 0 +#if PMEM_DEBUG_MSGS +#define DLOG(fmt,args...) \ + do { printk(KERN_INFO "[%s:%s:%d] "fmt, __FILE__, __func__, __LINE__, \ + ##args); } \ + while (0) +#else +#define DLOG(x...) do {} while (0) +#endif + +struct pmem_info { + struct miscdevice dev; + /* physical start address of the remaped pmem space */ + unsigned long base; + /* vitual start address of the remaped pmem space */ + unsigned char __iomem *vbase; + /* total size of the pmem space */ + unsigned long size; + /* number of entries in the pmem space */ + unsigned long num_entries; + /* pfn of the garbage page in memory */ + unsigned long garbage_pfn; + /* index of the garbage page in the pmem space */ + int garbage_index; + /* the bitmap for the region indicating which entries are allocated + * and which are free */ + struct pmem_bits *bitmap; + /* indicates the region should not be managed with an allocator */ + unsigned no_allocator; + /* indicates maps of this region should be cached, if a mix of + * cached and uncached is desired, set this and open the device with + * O_SYNC to get an uncached region */ + unsigned cached; + unsigned buffered; + /* in no_allocator mode the first mapper gets the whole space and sets + * this flag */ + unsigned allocated; + /* for debugging, creates a list of pmem file structs, the + * data_list_sem should be taken before pmem_data->sem if both are + * needed */ + struct semaphore data_list_sem; + struct list_head data_list; + /* pmem_sem protects the bitmap array + * a write lock should be held when modifying entries in bitmap + * a read lock should be held when reading data from bits or + * dereferencing a pointer into bitmap + * + * pmem_data->sem protects the pmem data of a particular file + * Many of the function that require the pmem_data->sem have a non- + * locking version for when the caller is already holding that sem. + * + * IF YOU TAKE BOTH LOCKS TAKE THEM IN THIS ORDER: + * down(pmem_data->sem) => down(bitmap_sem) + */ + struct rw_semaphore bitmap_sem; + + long (*ioctl)(struct file *, unsigned int, unsigned long); + int (*release)(struct inode *, struct file *); +}; + +static struct pmem_info pmem[PMEM_MAX_DEVICES]; +static int id_count; + +#define PMEM_IS_FREE(id, index) !(pmem[id].bitmap[index].allocated) +#define PMEM_ORDER(id, index) pmem[id].bitmap[index].order +#define PMEM_BUDDY_INDEX(id, index) (index ^ (1 << PMEM_ORDER(id, index))) +#define PMEM_NEXT_INDEX(id, index) (index + (1 << PMEM_ORDER(id, index))) +#define PMEM_OFFSET(index) (index * PMEM_MIN_ALLOC) +#define PMEM_START_ADDR(id, index) (PMEM_OFFSET(index) + pmem[id].base) +#define PMEM_LEN(id, index) ((1 << PMEM_ORDER(id, index)) * PMEM_MIN_ALLOC) +#define PMEM_END_ADDR(id, index) (PMEM_START_ADDR(id, index) + \ + PMEM_LEN(id, index)) +#define PMEM_START_VADDR(id, index) (PMEM_OFFSET(id, index) + pmem[id].vbase) +#define PMEM_END_VADDR(id, index) (PMEM_START_VADDR(id, index) + \ + PMEM_LEN(id, index)) +#define PMEM_REVOKED(data) (data->flags & PMEM_FLAGS_REVOKED) +#define PMEM_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK))) +#define PMEM_IS_SUBMAP(data) ((data->flags & PMEM_FLAGS_SUBMAP) && \ + (!(data->flags & PMEM_FLAGS_UNSUBMAP))) + +static int pmem_release(struct inode *, struct file *); +static int pmem_mmap(struct file *, struct vm_area_struct *); +static int pmem_open(struct inode *, struct file *); +static long pmem_ioctl(struct file *, unsigned int, unsigned long); + +struct file_operations pmem_fops = { + .release = pmem_release, + .mmap = pmem_mmap, + .open = pmem_open, + .unlocked_ioctl = pmem_ioctl, +}; + +static int get_id(struct file *file) +{ + return MINOR(file->f_dentry->d_inode->i_rdev); +} + +int is_pmem_file(struct file *file) +{ + int id; + + if (unlikely(!file || !file->f_dentry || !file->f_dentry->d_inode)) + return 0; + id = get_id(file); + if (unlikely(id >= PMEM_MAX_DEVICES)) + return 0; + if (unlikely(file->f_dentry->d_inode->i_rdev != + MKDEV(MISC_MAJOR, pmem[id].dev.minor))) + return 0; + return 1; +} + +static int has_allocation(struct file *file) +{ + struct pmem_data *data; + /* check is_pmem_file first if not accessed via pmem_file_ops */ + + if (unlikely(!file->private_data)) + return 0; + data = (struct pmem_data *)file->private_data; + if (unlikely(data->index < 0)) + return 0; + return 1; +} + +static int is_master_owner(struct file *file) +{ + struct file *master_file; + struct pmem_data *data; + int put_needed, ret = 0; + + if (!is_pmem_file(file) || !has_allocation(file)) + return 0; + data = (struct pmem_data *)file->private_data; + if (PMEM_FLAGS_MASTERMAP & data->flags) + return 1; + master_file = fget_light(data->master_fd, &put_needed); + if (master_file && data->master_file == master_file) + ret = 1; + fput_light(master_file, put_needed); + return ret; +} + +static int pmem_free(int id, int index) +{ + /* caller should hold the write lock on pmem_sem! */ + int buddy, curr = index; + DLOG("index %d\n", index); + + if (pmem[id].no_allocator) { + pmem[id].allocated--; + return 0; + } + /* clean up the bitmap, merging any buddies */ + pmem[id].bitmap[curr].allocated = 0; + /* find a slots buddy Buddy# = Slot# ^ (1 << order) + * if the buddy is also free merge them + * repeat until the buddy is not free or end of the bitmap is reached + */ + do { + buddy = PMEM_BUDDY_INDEX(id, curr); + if (PMEM_IS_FREE(id, buddy) && + PMEM_ORDER(id, buddy) == PMEM_ORDER(id, curr)) { + PMEM_ORDER(id, buddy)++; + PMEM_ORDER(id, curr)++; + curr = min(buddy, curr); + } else { + break; + } + } while (curr < pmem[id].num_entries); + + return 0; +} + +static void pmem_revoke(struct file *file, struct pmem_data *data); + +static int pmem_release(struct inode *inode, struct file *file) +{ + struct pmem_data *data = (struct pmem_data *)file->private_data; + struct pmem_region_node *region_node; + struct list_head *elt, *elt2; + int id = get_id(file), ret = 0; + + + down(&pmem[id].data_list_sem); + /* if this file is a master, revoke all the memory in the connected + * files */ + if (PMEM_FLAGS_MASTERMAP & data->flags) { + struct pmem_data *sub_data; + list_for_each(elt, &pmem[id].data_list) { + sub_data = list_entry(elt, struct pmem_data, list); + down_read(&sub_data->sem); + if (PMEM_IS_SUBMAP(sub_data) && + file == sub_data->master_file) { + up_read(&sub_data->sem); + pmem_revoke(file, sub_data); + } else + up_read(&sub_data->sem); + } + } + list_del(&data->list); + up(&pmem[id].data_list_sem); + + + down_write(&data->sem); + + /* if its not a conencted file and it has an allocation, free it */ + if (!(PMEM_FLAGS_CONNECTED & data->flags) && has_allocation(file)) { + down_write(&pmem[id].bitmap_sem); + ret = pmem_free(id, data->index); + up_write(&pmem[id].bitmap_sem); + } + + /* if this file is a submap (mapped, connected file), downref the + * task struct */ + if (PMEM_FLAGS_SUBMAP & data->flags) + if (data->task) { + put_task_struct(data->task); + data->task = NULL; + } + + file->private_data = NULL; + + list_for_each_safe(elt, elt2, &data->region_list) { + region_node = list_entry(elt, struct pmem_region_node, list); + list_del(elt); + kfree(region_node); + } + BUG_ON(!list_empty(&data->region_list)); + + up_write(&data->sem); + kfree(data); + if (pmem[id].release) + ret = pmem[id].release(inode, file); + + return ret; +} + +static int pmem_open(struct inode *inode, struct file *file) +{ + struct pmem_data *data; + int id = get_id(file); + int ret = 0; + + DLOG("current %u file %p(%d)\n", current->pid, file, file_count(file)); + + data = kmalloc(sizeof(struct pmem_data), GFP_KERNEL); + if (!data) { + printk("pmem: unable to allocate memory for pmem metadata."); + return -1; + } + data->flags = 0; + data->index = -1; + data->task = NULL; + data->vma = NULL; + data->pid = 0; + data->master_file = NULL; +#if PMEM_DEBUG + data->ref = 0; +#endif + INIT_LIST_HEAD(&data->region_list); + init_rwsem(&data->sem); + + file->private_data = data; + INIT_LIST_HEAD(&data->list); + + down(&pmem[id].data_list_sem); + list_add(&data->list, &pmem[id].data_list); + up(&pmem[id].data_list_sem); + return ret; +} + +static unsigned long pmem_order(unsigned long len) +{ + int i; + + len = (len + PMEM_MIN_ALLOC - 1)/PMEM_MIN_ALLOC; + len--; + for (i = 0; i < sizeof(len)*8; i++) + if (len >> i == 0) + break; + return i; +} + +static int pmem_allocate(int id, unsigned long len) +{ + /* caller should hold the write lock on pmem_sem! */ + /* return the corresponding pdata[] entry */ + int curr = 0; + int end = pmem[id].num_entries; + int best_fit = -1; + unsigned long order = pmem_order(len); + + if (pmem[id].no_allocator) { + DLOG("no allocator"); + if (len > pmem[id].size) + return -1; + if (pmem[id].allocated) + printk(KERN_WARNING "pmem: warning pmem[%d].allocated=%d\n", + id, pmem[id].allocated); + pmem[id].allocated++; + return len; + } + + if (order > PMEM_MAX_ORDER) + return -1; + DLOG("order %lx\n", order); + + /* look through the bitmap: + * if you find a free slot of the correct order use it + * otherwise, use the best fit (smallest with size > order) slot + */ + while (curr < end) { + if (PMEM_IS_FREE(id, curr)) { + if (PMEM_ORDER(id, curr) == (unsigned char)order) { + /* set the not free bit and clear others */ + best_fit = curr; + break; + } + if (PMEM_ORDER(id, curr) > (unsigned char)order && + (best_fit < 0 || + PMEM_ORDER(id, curr) < PMEM_ORDER(id, best_fit))) + best_fit = curr; + } + curr = PMEM_NEXT_INDEX(id, curr); + } + + /* if best_fit < 0, there are no suitable slots, + * return an error + */ + if (best_fit < 0) { + printk("pmem: no space left to allocate!\n"); + return -1; + } + + /* now partition the best fit: + * split the slot into 2 buddies of order - 1 + * repeat until the slot is of the correct order + */ + while (PMEM_ORDER(id, best_fit) > (unsigned char)order) { + int buddy; + PMEM_ORDER(id, best_fit) -= 1; + buddy = PMEM_BUDDY_INDEX(id, best_fit); + PMEM_ORDER(id, buddy) = PMEM_ORDER(id, best_fit); + } + pmem[id].bitmap[best_fit].allocated = 1; + return best_fit; +} + +static pgprot_t phys_mem_access_prot(struct file *file, pgprot_t vma_prot) +{ + int id = get_id(file); +#ifdef pgprot_noncached + if (pmem[id].cached == 0 || file->f_flags & O_SYNC) + return pgprot_noncached(vma_prot); +#endif +#ifdef pgprot_ext_buffered + else if (pmem[id].buffered) + return pgprot_ext_buffered(vma_prot); +#endif + return vma_prot; +} + +static unsigned long pmem_start_addr(int id, struct pmem_data *data) +{ + if (pmem[id].no_allocator) + return PMEM_START_ADDR(id, 0); + else + return PMEM_START_ADDR(id, data->index); + +} + +static void *pmem_start_vaddr(int id, struct pmem_data *data) +{ + return pmem_start_addr(id, data) - pmem[id].base + pmem[id].vbase; +} + +static unsigned long pmem_len(int id, struct pmem_data *data) +{ + if (pmem[id].no_allocator) + return data->index; + else + return PMEM_LEN(id, data->index); +} + +static int pmem_map_garbage(int id, struct vm_area_struct *vma, + struct pmem_data *data, unsigned long offset, + unsigned long len) +{ + int i, garbage_pages = len >> PAGE_SHIFT; + + vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP | VM_SHARED | VM_WRITE; + for (i = 0; i < garbage_pages; i++) { + if (vm_insert_pfn(vma, vma->vm_start + offset + (i * PAGE_SIZE), + pmem[id].garbage_pfn)) + return -EAGAIN; + } + return 0; +} + +static int pmem_unmap_pfn_range(int id, struct vm_area_struct *vma, + struct pmem_data *data, unsigned long offset, + unsigned long len) +{ + int garbage_pages; + DLOG("unmap offset %lx len %lx\n", offset, len); + + BUG_ON(!PMEM_IS_PAGE_ALIGNED(len)); + + garbage_pages = len >> PAGE_SHIFT; + zap_page_range(vma, vma->vm_start + offset, len, NULL); + pmem_map_garbage(id, vma, data, offset, len); + return 0; +} + +static int pmem_map_pfn_range(int id, struct vm_area_struct *vma, + struct pmem_data *data, unsigned long offset, + unsigned long len) +{ + DLOG("map offset %lx len %lx\n", offset, len); + BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_start)); + BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_end)); + BUG_ON(!PMEM_IS_PAGE_ALIGNED(len)); + BUG_ON(!PMEM_IS_PAGE_ALIGNED(offset)); + + if (io_remap_pfn_range(vma, vma->vm_start + offset, + (pmem_start_addr(id, data) + offset) >> PAGE_SHIFT, + len, vma->vm_page_prot)) { + return -EAGAIN; + } + return 0; +} + +static int pmem_remap_pfn_range(int id, struct vm_area_struct *vma, + struct pmem_data *data, unsigned long offset, + unsigned long len) +{ + /* hold the mm semp for the vma you are modifying when you call this */ + BUG_ON(!vma); + zap_page_range(vma, vma->vm_start + offset, len, NULL); + return pmem_map_pfn_range(id, vma, data, offset, len); +} + +static void pmem_vma_open(struct vm_area_struct *vma) +{ + struct file *file = vma->vm_file; + struct pmem_data *data = file->private_data; + int id = get_id(file); + /* this should never be called as we don't support copying pmem + * ranges via fork */ + BUG_ON(!has_allocation(file)); + down_write(&data->sem); + /* remap the garbage pages, forkers don't get access to the data */ + pmem_unmap_pfn_range(id, vma, data, 0, vma->vm_start - vma->vm_end); + up_write(&data->sem); +} + +static void pmem_vma_close(struct vm_area_struct *vma) +{ + struct file *file = vma->vm_file; + struct pmem_data *data = file->private_data; + + DLOG("current %u ppid %u file %p count %d\n", current->pid, + current->parent->pid, file, file_count(file)); + if (unlikely(!is_pmem_file(file) || !has_allocation(file))) { + printk(KERN_WARNING "pmem: something is very wrong, you are " + "closing a vm backing an allocation that doesn't " + "exist!\n"); + return; + } + down_write(&data->sem); + if (data->vma == vma) { + data->vma = NULL; + if ((data->flags & PMEM_FLAGS_CONNECTED) && + (data->flags & PMEM_FLAGS_SUBMAP)) + data->flags |= PMEM_FLAGS_UNSUBMAP; + } + /* the kernel is going to free this vma now anyway */ + up_write(&data->sem); +} + +static struct vm_operations_struct vm_ops = { + .open = pmem_vma_open, + .close = pmem_vma_close, +}; + +static int pmem_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct pmem_data *data; + int index; + unsigned long vma_size = vma->vm_end - vma->vm_start; + int ret = 0, id = get_id(file); + + if (vma->vm_pgoff || !PMEM_IS_PAGE_ALIGNED(vma_size)) { +#if PMEM_DEBUG + printk(KERN_ERR "pmem: mmaps must be at offset zero, aligned" + " and a multiple of pages_size.\n"); +#endif + return -EINVAL; + } + + data = (struct pmem_data *)file->private_data; + down_write(&data->sem); + + /* if file->private_data == unalloced, alloc*/ + if (data && data->index == -1) { + down_write(&pmem[id].bitmap_sem); + index = pmem_allocate(id, vma->vm_end - vma->vm_start); + up_write(&pmem[id].bitmap_sem); + data->index = index; + } + /* either no space was available or an error occured */ + if (!has_allocation(file)) { + ret = -EINVAL; + printk("pmem: could not find allocation for map.\n"); + goto error; + } + + if (pmem_len(id, data) < vma_size) { +#if PMEM_DEBUG + printk(KERN_WARNING "pmem: mmap size [%lu] does not match" + "size of backing region [%lu].\n", vma_size, + pmem_len(id, data)); +#endif + ret = -EINVAL; + goto error; + } + + vma->vm_pgoff = pmem_start_addr(id, data) >> PAGE_SHIFT; + vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_page_prot); + + if (data->flags & PMEM_FLAGS_CONNECTED) { + struct pmem_region_node *region_node; + struct list_head *elt; + if (pmem_map_garbage(id, vma, data, 0, vma_size)) { + printk("pmem: mmap failed in kernel!\n"); + ret = -EAGAIN; + goto error; + } + list_for_each(elt, &data->region_list) { + region_node = list_entry(elt, struct pmem_region_node, + list); + DLOG("remapping file: %p %lx %lx\n", file, + region_node->region.offset, + region_node->region.len); + if (pmem_remap_pfn_range(id, vma, data, + region_node->region.offset, + region_node->region.len)) { + ret = -EAGAIN; + goto error; + } + } + data->flags |= PMEM_FLAGS_SUBMAP; + get_task_struct(current->group_leader); + data->task = current->group_leader; + data->vma = vma; +#if PMEM_DEBUG + data->pid = current->pid; +#endif + DLOG("submmapped file %p vma %p pid %u\n", file, vma, + current->pid); + } else { + if (pmem_map_pfn_range(id, vma, data, 0, vma_size)) { + printk(KERN_INFO "pmem: mmap failed in kernel!\n"); + ret = -EAGAIN; + goto error; + } + data->flags |= PMEM_FLAGS_MASTERMAP; + data->pid = current->pid; + } + vma->vm_ops = &vm_ops; +error: + up_write(&data->sem); + return ret; +} + +/* the following are the api for accessing pmem regions by other drivers + * from inside the kernel */ +int get_pmem_user_addr(struct file *file, unsigned long *start, + unsigned long *len) +{ + struct pmem_data *data; + if (!is_pmem_file(file) || !has_allocation(file)) { +#if PMEM_DEBUG + printk(KERN_INFO "pmem: requested pmem data from invalid" + "file.\n"); +#endif + return -1; + } + data = (struct pmem_data *)file->private_data; + down_read(&data->sem); + if (data->vma) { + *start = data->vma->vm_start; + *len = data->vma->vm_end - data->vma->vm_start; + } else { + *start = 0; + *len = 0; + } + up_read(&data->sem); + return 0; +} + +int get_pmem_addr(struct file *file, unsigned long *start, + unsigned long *vstart, unsigned long *len) +{ + struct pmem_data *data; + int id; + + if (!is_pmem_file(file) || !has_allocation(file)) { + return -1; + } + + data = (struct pmem_data *)file->private_data; + if (data->index == -1) { +#if PMEM_DEBUG + printk(KERN_INFO "pmem: requested pmem data from file with no " + "allocation.\n"); + return -1; +#endif + } + id = get_id(file); + + down_read(&data->sem); + *start = pmem_start_addr(id, data); + *len = pmem_len(id, data); + *vstart = (unsigned long)pmem_start_vaddr(id, data); + up_read(&data->sem); +#if PMEM_DEBUG + down_write(&data->sem); + data->ref++; + up_write(&data->sem); +#endif + return 0; +} + +int get_pmem_file(int fd, unsigned long *start, unsigned long *vstart, + unsigned long *len, struct file **filp) +{ + struct file *file; + + file = fget(fd); + if (unlikely(file == NULL)) { + printk(KERN_INFO "pmem: requested data from file descriptor " + "that doesn't exist."); + return -1; + } + + if (get_pmem_addr(file, start, vstart, len)) + goto end; + + if (filp) + *filp = file; + return 0; +end: + fput(file); + return -1; +} + +void put_pmem_file(struct file *file) +{ + struct pmem_data *data; + int id; + + if (!is_pmem_file(file)) + return; + id = get_id(file); + data = (struct pmem_data *)file->private_data; +#if PMEM_DEBUG + down_write(&data->sem); + if (data->ref == 0) { + printk("pmem: pmem_put > pmem_get %s (pid %d)\n", + pmem[id].dev.name, data->pid); + BUG(); + } + data->ref--; + up_write(&data->sem); +#endif + fput(file); +} + +void flush_pmem_file(struct file *file, unsigned long offset, unsigned long len) +{ + struct pmem_data *data; + int id; + void *vaddr; + struct pmem_region_node *region_node; + struct list_head *elt; + void *flush_start, *flush_end; + + if (!is_pmem_file(file) || !has_allocation(file)) { + return; + } + + id = get_id(file); + data = (struct pmem_data *)file->private_data; + if (!pmem[id].cached || file->f_flags & O_SYNC) + return; + + down_read(&data->sem); + vaddr = pmem_start_vaddr(id, data); + /* if this isn't a submmapped file, flush the whole thing */ + if (unlikely(!(data->flags & PMEM_FLAGS_CONNECTED))) { + dmac_flush_range(vaddr, vaddr + pmem_len(id, data)); + goto end; + } + /* otherwise, flush the region of the file we are drawing */ + list_for_each(elt, &data->region_list) { + region_node = list_entry(elt, struct pmem_region_node, list); + if ((offset >= region_node->region.offset) && + ((offset + len) <= (region_node->region.offset + + region_node->region.len))) { + flush_start = vaddr + region_node->region.offset; + flush_end = flush_start + region_node->region.len; + dmac_flush_range(flush_start, flush_end); + break; + } + } +end: + up_read(&data->sem); +} + +static int pmem_connect(unsigned long connect, struct file *file) +{ + struct pmem_data *data = (struct pmem_data *)file->private_data; + struct pmem_data *src_data; + struct file *src_file; + int ret = 0, put_needed; + + down_write(&data->sem); + /* retrieve the src file and check it is a pmem file with an alloc */ + src_file = fget_light(connect, &put_needed); + DLOG("connect %p to %p\n", file, src_file); + if (!src_file) { + printk("pmem: src file not found!\n"); + ret = -EINVAL; + goto err_no_file; + } + if (unlikely(!is_pmem_file(src_file) || !has_allocation(src_file))) { + printk(KERN_INFO "pmem: src file is not a pmem file or has no " + "alloc!\n"); + ret = -EINVAL; + goto err_bad_file; + } + src_data = (struct pmem_data *)src_file->private_data; + + if (has_allocation(file) && (data->index != src_data->index)) { + printk("pmem: file is already mapped but doesn't match this" + " src_file!\n"); + ret = -EINVAL; + goto err_bad_file; + } + data->index = src_data->index; + data->flags |= PMEM_FLAGS_CONNECTED; + data->master_fd = connect; + data->master_file = src_file; + +err_bad_file: + fput_light(src_file, put_needed); +err_no_file: + up_write(&data->sem); + return ret; +} + +static void pmem_unlock_data_and_mm(struct pmem_data *data, + struct mm_struct *mm) +{ + up_write(&data->sem); + if (mm != NULL) { + up_write(&mm->mmap_sem); + mmput(mm); + } +} + +static int pmem_lock_data_and_mm(struct file *file, struct pmem_data *data, + struct mm_struct **locked_mm) +{ + int ret = 0; + struct mm_struct *mm = NULL; + *locked_mm = NULL; +lock_mm: + down_read(&data->sem); + if (PMEM_IS_SUBMAP(data)) { + mm = get_task_mm(data->task); + if (!mm) { +#if PMEM_DEBUG + printk("pmem: can't remap task is gone!\n"); +#endif + up_read(&data->sem); + return -1; + } + } + up_read(&data->sem); + + if (mm) + down_write(&mm->mmap_sem); + + down_write(&data->sem); + /* check that the file didn't get mmaped before we could take the + * data sem, this should be safe b/c you can only submap each file + * once */ + if (PMEM_IS_SUBMAP(data) && !mm) { + pmem_unlock_data_and_mm(data, mm); + goto lock_mm; + } + /* now check that vma.mm is still there, it could have been + * deleted by vma_close before we could get the data->sem */ + if ((data->flags & PMEM_FLAGS_UNSUBMAP) && (mm != NULL)) { + /* might as well release this */ + if (data->flags & PMEM_FLAGS_SUBMAP) { + put_task_struct(data->task); + data->task = NULL; + /* lower the submap flag to show the mm is gone */ + data->flags &= ~(PMEM_FLAGS_SUBMAP); + } + pmem_unlock_data_and_mm(data, mm); + return -1; + } + *locked_mm = mm; + return ret; +} + +int pmem_remap(struct pmem_region *region, struct file *file, + unsigned operation) +{ + int ret; + struct pmem_region_node *region_node; + struct mm_struct *mm = NULL; + struct list_head *elt, *elt2; + int id = get_id(file); + struct pmem_data *data = (struct pmem_data *)file->private_data; + + /* pmem region must be aligned on a page boundry */ + if (unlikely(!PMEM_IS_PAGE_ALIGNED(region->offset) || + !PMEM_IS_PAGE_ALIGNED(region->len))) { +#if PMEM_DEBUG + printk("pmem: request for unaligned pmem suballocation " + "%lx %lx\n", region->offset, region->len); +#endif + return -EINVAL; + } + + /* if userspace requests a region of len 0, there's nothing to do */ + if (region->len == 0) + return 0; + + /* lock the mm and data */ + ret = pmem_lock_data_and_mm(file, data, &mm); + if (ret) + return 0; + + /* only the owner of the master file can remap the client fds + * that back in it */ + if (!is_master_owner(file)) { +#if PMEM_DEBUG + printk("pmem: remap requested from non-master process\n"); +#endif + ret = -EINVAL; + goto err; + } + + /* check that the requested range is within the src allocation */ + if (unlikely((region->offset > pmem_len(id, data)) || + (region->len > pmem_len(id, data)) || + (region->offset + region->len > pmem_len(id, data)))) { +#if PMEM_DEBUG + printk(KERN_INFO "pmem: suballoc doesn't fit in src_file!\n"); +#endif + ret = -EINVAL; + goto err; + } + + if (operation == PMEM_MAP) { + region_node = kmalloc(sizeof(struct pmem_region_node), + GFP_KERNEL); + if (!region_node) { + ret = -ENOMEM; +#if PMEM_DEBUG + printk(KERN_INFO "No space to allocate metadata!"); +#endif + goto err; + } + region_node->region = *region; + list_add(®ion_node->list, &data->region_list); + } else if (operation == PMEM_UNMAP) { + int found = 0; + list_for_each_safe(elt, elt2, &data->region_list) { + region_node = list_entry(elt, struct pmem_region_node, + list); + if (region->len == 0 || + (region_node->region.offset == region->offset && + region_node->region.len == region->len)) { + list_del(elt); + kfree(region_node); + found = 1; + } + } + if (!found) { +#if PMEM_DEBUG + printk("pmem: Unmap region does not map any mapped " + "region!"); +#endif + ret = -EINVAL; + goto err; + } + } + + if (data->vma && PMEM_IS_SUBMAP(data)) { + if (operation == PMEM_MAP) + ret = pmem_remap_pfn_range(id, data->vma, data, + region->offset, region->len); + else if (operation == PMEM_UNMAP) + ret = pmem_unmap_pfn_range(id, data->vma, data, + region->offset, region->len); + } + +err: + pmem_unlock_data_and_mm(data, mm); + return ret; +} + +static void pmem_revoke(struct file *file, struct pmem_data *data) +{ + struct pmem_region_node *region_node; + struct list_head *elt, *elt2; + struct mm_struct *mm = NULL; + int id = get_id(file); + int ret = 0; + + data->master_file = NULL; + ret = pmem_lock_data_and_mm(file, data, &mm); + /* if lock_data_and_mm fails either the task that mapped the fd, or + * the vma that mapped it have already gone away, nothing more + * needs to be done */ + if (ret) + return; + /* unmap everything */ + /* delete the regions and region list nothing is mapped any more */ + if (data->vma) + list_for_each_safe(elt, elt2, &data->region_list) { + region_node = list_entry(elt, struct pmem_region_node, + list); + pmem_unmap_pfn_range(id, data->vma, data, + region_node->region.offset, + region_node->region.len); + list_del(elt); + kfree(region_node); + } + /* delete the master file */ + pmem_unlock_data_and_mm(data, mm); +} + +static void pmem_get_size(struct pmem_region *region, struct file *file) +{ + struct pmem_data *data = (struct pmem_data *)file->private_data; + int id = get_id(file); + + if (!has_allocation(file)) { + region->offset = 0; + region->len = 0; + return; + } else { + region->offset = pmem_start_addr(id, data); + region->len = pmem_len(id, data); + } + DLOG("offset %lx len %lx\n", region->offset, region->len); +} + + +static long pmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct pmem_data *data; + int id = get_id(file); + + switch (cmd) { + case PMEM_GET_PHYS: + { + struct pmem_region region; + DLOG("get_phys\n"); + if (!has_allocation(file)) { + region.offset = 0; + region.len = 0; + } else { + data = (struct pmem_data *)file->private_data; + region.offset = pmem_start_addr(id, data); + region.len = pmem_len(id, data); + } + /* printk(KERN_INFO "pmem: request for physical address of pmem region " + "from process %d.\n", current->pid);*/ + if (copy_to_user((void __user *)arg, ®ion, + sizeof(struct pmem_region))) + return -EFAULT; + break; + } + case PMEM_MAP: + { + struct pmem_region region; + if (copy_from_user(®ion, (void __user *)arg, + sizeof(struct pmem_region))) + return -EFAULT; + data = (struct pmem_data *)file->private_data; + return pmem_remap(®ion, file, PMEM_MAP); + } + break; + case PMEM_UNMAP: + { + struct pmem_region region; + if (copy_from_user(®ion, (void __user *)arg, + sizeof(struct pmem_region))) + return -EFAULT; + data = (struct pmem_data *)file->private_data; + return pmem_remap(®ion, file, PMEM_UNMAP); + break; + } + case PMEM_GET_SIZE: + { + struct pmem_region region; + DLOG("get_size\n"); + pmem_get_size(®ion, file); + if (copy_to_user((void __user *)arg, ®ion, + sizeof(struct pmem_region))) + return -EFAULT; + break; + } + case PMEM_GET_TOTAL_SIZE: + { + struct pmem_region region; + DLOG("get total size\n"); + region.offset = 0; + get_id(file); + region.len = pmem[id].size; + if (copy_to_user((void __user *)arg, ®ion, + sizeof(struct pmem_region))) + return -EFAULT; + break; + } + case PMEM_ALLOCATE: + { + if (has_allocation(file)) + return -EINVAL; + data = (struct pmem_data *)file->private_data; + data->index = pmem_allocate(id, arg); + break; + } + case PMEM_CONNECT: + DLOG("connect\n"); + return pmem_connect(arg, file); + break; +#if 0 /* Removing android-2.6.32 PMEM_CACHE_FLUSH in favour of STE one */ + case PMEM_CACHE_FLUSH: + { + struct pmem_region region; + DLOG("flush\n"); + if (copy_from_user(®ion, (void __user *)arg, + sizeof(struct pmem_region))) + return -EFAULT; + flush_pmem_file(file, region.offset, region.len); + break; + } +#endif + case PMEM_CACHE_FLUSH: + { + struct pmem_cache_flush_region region; + struct vm_area_struct *vma = NULL; + + if (copy_from_user(®ion, (void __user *)arg, + sizeof(struct pmem_cache_flush_region))) + return -EFAULT; + + vma = find_vma(current->mm, region.LogicalAddress); + if (vma ) + { + data = (struct pmem_data *)file->private_data; + down_read(&data->sem); + dmac_flush_range((void *)region.LogicalAddress, + (void *)(region.LogicalAddress+region.size)); +#ifdef CONFIG_OUTER_CACHE + outer_flush_range(region.PhysicalAddress, + region.PhysicalAddress+region.size); +#endif + up_read(&data->sem); + } + break; + } + case PMEM_CACHE_CLEAN: + { + struct pmem_cache_flush_region region; + struct vm_area_struct *vma = NULL; + + if (copy_from_user(®ion, (void __user *)arg, + sizeof(struct pmem_cache_flush_region))) + return -EFAULT; + + vma = find_vma(current->mm, region.LogicalAddress); + if (vma ) + { + data = (struct pmem_data *)file->private_data; + down_read(&data->sem); + dmac_flush_range((void*)region.LogicalAddress, + (void*)(region.LogicalAddress + region.size)); +#ifdef CONFIG_OUTER_CACHE + outer_clean_range(region.PhysicalAddress, + region.PhysicalAddress+region.size); +#endif + up_read(&data->sem); + } + break; + } + case PMEM_CACHE_INVALIDATE: + { + struct pmem_cache_flush_region region; + struct vm_area_struct *vma = NULL; + + if (copy_from_user(®ion, (void __user *)arg, + sizeof(struct pmem_cache_flush_region))) + return -EFAULT; + + vma = find_vma(current->mm, region.LogicalAddress); + if (vma ) + { + data = (struct pmem_data *)file->private_data; + down_read(&data->sem); + dmac_unmap_area((void *)region.LogicalAddress, + region.size, DMA_FROM_DEVICE); +#ifdef CONFIG_OUTER_CACHE + outer_inv_range(region.PhysicalAddress, + region.PhysicalAddress+region.size); +#endif + up_read(&data->sem); + } + break; + } + default: + if (pmem[id].ioctl) + return pmem[id].ioctl(file, cmd, arg); + return -EINVAL; + } + return 0; +} + +#if PMEM_DEBUG +static ssize_t debug_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t debug_read(struct file *file, char __user *buf, size_t count, + loff_t *ppos) +{ + struct list_head *elt, *elt2; + struct pmem_data *data; + struct pmem_region_node *region_node; + int id = (int)file->private_data; + const int debug_bufmax = 4096; + static char buffer[4096]; + int n = 0; + + DLOG("debug open\n"); + n = scnprintf(buffer, debug_bufmax, + "pid #: mapped regions (offset, len) (offset,len)...\n"); + + down(&pmem[id].data_list_sem); + list_for_each(elt, &pmem[id].data_list) { + data = list_entry(elt, struct pmem_data, list); + down_read(&data->sem); + n += scnprintf(buffer + n, debug_bufmax - n, "pid %u:", + data->pid); + list_for_each(elt2, &data->region_list) { + region_node = list_entry(elt2, struct pmem_region_node, + list); + n += scnprintf(buffer + n, debug_bufmax - n, + "(%lx,%lx) ", + region_node->region.offset, + region_node->region.len); + } + n += scnprintf(buffer + n, debug_bufmax - n, "\n"); + up_read(&data->sem); + } + up(&pmem[id].data_list_sem); + + n++; + buffer[n] = 0; + return simple_read_from_buffer(buf, count, ppos, buffer, n); +} + +static struct file_operations debug_fops = { + .read = debug_read, + .open = debug_open, +}; +#endif + +#if 0 +static struct miscdevice pmem_dev = { + .name = "pmem", + .fops = &pmem_fops, +}; +#endif + +int pmem_setup(struct android_pmem_platform_data *pdata, + long (*ioctl)(struct file *, unsigned int, unsigned long), + int (*release)(struct inode *, struct file *)) +{ + int err = 0; + int i, index = 0; + int id = id_count; + id_count++; + + pmem[id].no_allocator = pdata->no_allocator; + pmem[id].cached = pdata->cached; + pmem[id].buffered = pdata->buffered; + pmem[id].base = pdata->start; + pmem[id].size = pdata->size; + pmem[id].ioctl = ioctl; + pmem[id].release = release; + init_rwsem(&pmem[id].bitmap_sem); + init_MUTEX(&pmem[id].data_list_sem); + INIT_LIST_HEAD(&pmem[id].data_list); + pmem[id].dev.name = pdata->name; + pmem[id].dev.minor = id; + pmem[id].dev.fops = &pmem_fops; + printk(KERN_INFO "%s: %d init\n", pdata->name, pdata->cached); + + err = misc_register(&pmem[id].dev); + if (err) { + printk(KERN_ALERT "Unable to register pmem driver!\n"); + goto err_cant_register_device; + } + pmem[id].num_entries = pmem[id].size / PMEM_MIN_ALLOC; + + pmem[id].bitmap = kmalloc(pmem[id].num_entries * + sizeof(struct pmem_bits), GFP_KERNEL); + if (!pmem[id].bitmap) + goto err_no_mem_for_metadata; + + memset(pmem[id].bitmap, 0, sizeof(struct pmem_bits) * + pmem[id].num_entries); + + for (i = sizeof(pmem[id].num_entries) * 8 - 1; i >= 0; i--) { + if ((pmem[id].num_entries) & 1<<i) { + PMEM_ORDER(id, index) = i; + index = PMEM_NEXT_INDEX(id, index); + } + } + + if (pmem[id].cached) + pmem[id].vbase = ioremap_cached(pmem[id].base, + pmem[id].size); +#ifdef ioremap_ext_buffered + else if (pmem[id].buffered) + pmem[id].vbase = ioremap_ext_buffered(pmem[id].base, + pmem[id].size); +#endif + else + pmem[id].vbase = ioremap(pmem[id].base, pmem[id].size); + + if (pmem[id].vbase == 0) + goto error_cant_remap; + + pmem[id].garbage_pfn = page_to_pfn(alloc_page(GFP_KERNEL)); + if (pmem[id].no_allocator) + pmem[id].allocated = 0; + +#if PMEM_DEBUG + debugfs_create_file(pdata->name, S_IFREG | S_IRUGO, NULL, (void *)id, + &debug_fops); +#endif + return 0; +error_cant_remap: + kfree(pmem[id].bitmap); +err_no_mem_for_metadata: + misc_deregister(&pmem[id].dev); +err_cant_register_device: + return -1; +} + +static int pmem_probe(struct platform_device *pdev) +{ + struct android_pmem_platform_data *pdata; + + if (!pdev || !pdev->dev.platform_data) { + printk(KERN_ALERT "Unable to probe pmem!\n"); + return -1; + } + pdata = pdev->dev.platform_data; + return pmem_setup(pdata, NULL, NULL); +} + + +static int pmem_remove(struct platform_device *pdev) +{ + int id = pdev->id; + __free_page(pfn_to_page(pmem[id].garbage_pfn)); + misc_deregister(&pmem[id].dev); + return 0; +} + +static struct platform_driver pmem_driver = { + .probe = pmem_probe, + .remove = pmem_remove, + .driver = { .name = "android_pmem" } +}; + + +static int __init pmem_init(void) +{ + return platform_driver_register(&pmem_driver); +} + +static void __exit pmem_exit(void) +{ + platform_driver_unregister(&pmem_driver); +} + +module_init(pmem_init); +module_exit(pmem_exit); + diff --git a/drivers/misc/shrm/Kconfig b/drivers/misc/shrm/Kconfig new file mode 100644 index 00000000000..fffee1c703e --- /dev/null +++ b/drivers/misc/shrm/Kconfig @@ -0,0 +1,49 @@ +# +# SHM HW kernel configuration +# +config U8500_SHRM + tristate "U8500 SHRM hardware driver" + depends on ARCH_U8500 && PHONET + default Y + ---help--- + If you say Y here, you will enable the STN8500 SHM hardware driver. + + If unsure, say N. +choice + prompt "Modem Image Version" + depends on U8500_SHRM + default SHRM_V1_UPDATES_VERSION + + config SHRM_ED_V1_VERSION + depends on U8500_SHRM + bool "SHRM ED / V1 " + help + Modem Images with ED/V1 updates + + config SHRM_V1_UPDATES_VERSION + depends on U8500_SHRM + bool "SHRM V1 UPDATES" + help + Modem Images with V1 Updates + +endchoice + +config U8500_SHRM_LOOP_BACK + tristate "U8500 SHRM loopback" + depends on U8500_SHRM + default n + ---help--- + If you say Y here, you will enable the shm loopback + + If unsure, say N. + +config U8500_SHRM_MODEM_SILENT_RESET + bool "U8500 SHRM Modem Silent Reset" + depends on U8500_SHRM + default n + ---help--- + If you say Y here, you will enable the modem silent reset feature + + If unsure, say N. + + diff --git a/drivers/misc/shrm/Makefile b/drivers/misc/shrm/Makefile new file mode 100644 index 00000000000..8115c24920b --- /dev/null +++ b/drivers/misc/shrm/Makefile @@ -0,0 +1,11 @@ +# +# Makefile for SHRM drivers +# + +ifdef CONFIG_PHONET +u8500_shrm-objs := modem_shrm_driver.o shrm_fifo.o shrm_protocol.o +else +u8500_shrm-objs := shrm_driver.o shrm_fifo.o shrm_protocol.o +endif + +obj-$(CONFIG_U8500_SHRM) += u8500_shrm.o diff --git a/drivers/misc/shrm/modem_shrm_driver.c b/drivers/misc/shrm/modem_shrm_driver.c new file mode 100644 index 00000000000..29368950256 --- /dev/null +++ b/drivers/misc/shrm/modem_shrm_driver.c @@ -0,0 +1,666 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * Author: Biju Das <biju.das@stericsson.com> for ST-Ericsson + * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com> for ST-Ericsson + * Author: Arun Murthy <arun.murthy@stericsson.com> for ST-Ericsson + * License terms: GNU General Public License (GPL) version 2 + */ + +#include <linux/err.h> +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/ioport.h> +#include <linux/smp_lock.h> +#include <linux/poll.h> +#include <linux/slab.h> +#include <linux/mutex.h> +#include <linux/uaccess.h> +#include <asm/atomic.h> +#include <linux/io.h> + +#include <mach/isa_ioctl.h> +#include <mach/shrm_driver.h> +#include <mach/shrm_private.h> +#include <mach/shrm_config.h> +#include <mach/shrm_net.h> +#include <mach/shrm.h> + +#include <linux/skbuff.h> +#ifdef CONFIG_HIGH_RES_TIMERS +#include <linux/hrtimer.h> +static struct hrtimer timer; +#endif +#include <linux/if_ether.h> +#include <linux/netdevice.h> +#include <linux/phonet.h> + +/* debug functionality */ +#define ISA_DEBUG 0 + +#define PHONET_TASKLET +#define MAX_RCV_LEN 2048 + +void do_phonet_rcv_tasklet(unsigned long unused); +struct tasklet_struct phonet_rcv_tasklet; + +/** + * audio_receive() - Receive audio channel completion callback + * @shrm: pointer to shrm device information structure + * @data: message pointer + * @n_bytes: message size + * @l2_header: L2 header/device ID 2->audio, 5->audio_loopback + * + * This fucntion is called from the audio receive handler. Copies the audio + * message from the FIFO to the AUDIO queue. The message is later copied from + * this queue to the user buffer through the char or net interface read + * operation. + */ +static int audio_receive(struct shrm_dev *shrm, void *data, + u32 n_bytes, u8 l2_header) +{ + u32 size = 0; + int ret = 0; + int idx; + u8 *psrc; + struct message_queue *q; + struct isadev_context *audiodev; + + dev_dbg(shrm->dev, "%s IN\n", __func__); + idx = shrm_get_cdev_index(l2_header); + if (idx < 0) { + dev_err(shrm->dev, "failed to get index\n"); + return idx; + } + audiodev = &shrm->isa_context->isadev[idx]; + q = &audiodev->dl_queue; + spin_lock(&q->update_lock); + /* Memcopy RX data first */ + if ((q->writeptr+n_bytes) >= q->size) { + psrc = (u8 *)data; + size = (q->size-q->writeptr); + /* Copy First Part of msg */ + memcpy((q->fifo_base+q->writeptr), psrc, size); + psrc += size; + /* Copy Second Part of msg at the top of fifo */ + memcpy(q->fifo_base, psrc, (n_bytes-size)); + } else { + memcpy((q->fifo_base+q->writeptr), data, n_bytes); + } + ret = add_msg_to_queue(q, n_bytes); + spin_unlock(&q->update_lock); + if (ret < 0) + dev_err(shrm->dev, "Adding a msg to message queue failed"); + dev_dbg(shrm->dev, "%s OUT\n", __func__); + return ret; +} + +/** + * common_receive() - Receive common channel completion callback + * @shrm: pointer to the shrm device information structure + * @data: message pointer + * @n_bytes: message size + * @l2_header: L2 header / device ID + * + * This function is called from the receive handler to copy the respective + * ISI, RPC, SECURITY message to its respective queue. The message is then + * copied from queue to the user buffer on char net interface read operation. + */ +static int common_receive(struct shrm_dev *shrm, void *data, + u32 n_bytes, u8 l2_header) +{ + u32 size = 0; + int ret = 0; + int idx; + u8 *psrc; + struct message_queue *q; + struct isadev_context *isa_dev; + + dev_dbg(shrm->dev, "%s IN\n", __func__); + idx = shrm_get_cdev_index(l2_header); + if (idx < 0) { + dev_err(shrm->dev, "failed to get index\n"); + return idx; + } + isa_dev = &shrm->isa_context->isadev[idx]; + q = &isa_dev->dl_queue; + spin_lock(&q->update_lock); + /* Memcopy RX data first */ + if ((q->writeptr+n_bytes) >= q->size) { + dev_dbg(shrm->dev, "Inside Loop Back\n"); + psrc = (u8 *)data; + size = (q->size-q->writeptr); + /* Copy First Part of msg */ + memcpy((q->fifo_base+q->writeptr), psrc, size); + psrc += size; + /* Copy Second Part of msg at the top of fifo */ + memcpy(q->fifo_base, psrc, (n_bytes-size)); + } else { + memcpy((q->fifo_base+q->writeptr), data, n_bytes); + } + ret = add_msg_to_queue(q, n_bytes); + spin_unlock(&q->update_lock); + if (ret < 0) { + dev_err(shrm->dev, "Adding a msg to message queue failed"); + return ret; + } + + + if (l2_header == ISI_MESSAGING) { + if (shrm->netdev_flag_up) { + dev_dbg(shrm->dev, + "scheduling the phonet tasklet from %s!\n", + __func__); + tasklet_schedule(&phonet_rcv_tasklet); + } + dev_dbg(shrm->dev, + "Out of phonet tasklet %s!!!\n", __func__); + } + dev_dbg(shrm->dev, "%s OUT\n", __func__); + return ret; +} + +/** + * rx_common_l2msg_handler() - common channel receive handler + * @l2_header: L2 header + * @msg: pointer to the receive buffer + * @length: length of the msg to read + * @shrm: pointer to shrm device information structure + * + * This function is called to receive the message from CaMsgPendingNotification + * interrupt handler. + */ +static void rx_common_l2msg_handler(u8 l2_header, + void *msg, u32 length, + struct shrm_dev *shrm) +{ + int ret = 0; + dev_dbg(shrm->dev, "%s IN\n", __func__); + + ret = common_receive(shrm, msg, length, l2_header); + if (ret < 0) + dev_err(shrm->dev, + "common receive with l2 header %d failed\n", l2_header); + + dev_dbg(shrm->dev, "%s OUT\n", __func__); +} + +/** + * rx_audio_l2msg_handler() - audio channel receive handler + * @l2_header: L2 header + * @msg: pointer to the receive buffer + * @length: length of the msg to read + * @shrm: pointer to shrm device information structure + * + * This function is called to receive the message from CaMsgPendingNotification + * interrupt handler. + */ +static void rx_audio_l2msg_handler(u8 l2_header, + void *msg, u32 length, + struct shrm_dev *shrm) +{ + int ret = 0; + + dev_dbg(shrm->dev, "%s IN\n", __func__); + ret = audio_receive(shrm, msg, length, l2_header); + if (ret < 0) + dev_err(shrm->dev, "audio receive failed\n"); + dev_dbg(shrm->dev, "%s OUT\n", __func__); +} + +static int __init shm_initialise_irq(struct shrm_dev *shrm) +{ + int err = 0; + + err = shrm_protocol_init(shrm, + rx_common_l2msg_handler, rx_audio_l2msg_handler); + if (err < 0) { + dev_err(shrm->dev, "SHM Protocol Init Failure\n"); + return err; + } + + err = request_irq(shrm->ca_wake_irq, + ca_wake_irq_handler, IRQF_TRIGGER_RISING, + "ca_wake-up", shrm); + if (err < 0) { + dev_err(shrm->dev, + "Unable to allocate shm tx interrupt line\n"); + free_irq(shrm->ca_wake_irq, shrm); + return err; + } + + err = request_irq(shrm->ac_read_notif_0_irq, + ac_read_notif_0_irq_handler, 0, + "ac_read_notif_0", shrm); + + if (err < 0) { + dev_err(shrm->dev, + "error ac_read_notif_0_irq interrupt line\n"); + goto irq_err1; + } + + err = request_irq(shrm->ac_read_notif_1_irq, + ac_read_notif_1_irq_handler, 0, + "ac_read_notif_1", shrm); + + if (err < 0) { + dev_err(shrm->dev, + "error ac_read_notif_1_irq interrupt line\n"); + goto irq_err2; + } + + err = request_irq(shrm->ca_msg_pending_notif_0_irq, + ca_msg_pending_notif_0_irq_handler, 0, + "ca_msg_pending_notif_0", shrm); + + if (err < 0) { + dev_err(shrm->dev, + "error ca_msg_pending_notif_0_irq line\n"); + goto irq_err3; + } + + err = request_irq(shrm->ca_msg_pending_notif_1_irq, + ca_msg_pending_notif_1_irq_handler, 0, + "ca_msg_pending_notif_1", shrm); + + if (err < 0) { + dev_err(shrm->dev, + "error ca_msg_pending_notif_1_irq interrupt line\n"); + goto irq_err4; + } + return err; +irq_err4: + free_irq(shrm->ca_msg_pending_notif_0_irq, shrm); +irq_err3: + free_irq(shrm->ac_read_notif_1_irq, shrm); +irq_err2: + free_irq(shrm->ac_read_notif_0_irq, shrm); +irq_err1: + free_irq(shrm->ca_wake_irq, shrm); + return err; +} + +static void free_shm_irq(struct shrm_dev *shrm) +{ + free_irq(shrm->ca_wake_irq, shrm); + free_irq(shrm->ac_read_notif_0_irq, shrm); + free_irq(shrm->ac_read_notif_1_irq, shrm); + free_irq(shrm->ca_msg_pending_notif_0_irq, shrm); + free_irq(shrm->ca_msg_pending_notif_1_irq, shrm); +} + + + +#ifdef CONFIG_HIGH_RES_TIMERS +static enum hrtimer_restart callback(struct hrtimer *timer) +{ + return HRTIMER_NORESTART; +} +#endif + +void do_phonet_rcv_tasklet(unsigned long unused) +{ + ssize_t ret; + struct shrm_dev *shrm = (struct shrm_dev *)unused; + + dev_dbg(shrm->dev, "%s IN\n", __func__); + for (;;) { + ret = shrm_net_receive(shrm->ndev); + if (ret == 0) { + dev_dbg(shrm->dev, "len is zero, queue empty\n"); + break; + } + if (ret < 0) { + dev_err(shrm->dev, "len < 0 !!! error!!!\n"); + break; + } + } + dev_dbg(shrm->dev, "%s OUT\n", __func__); +} + +static int shrm_probe(struct platform_device *pdev) +{ + int err = 0; + struct resource *res; + struct shrm_dev *shrm = NULL; + + shrm = kzalloc(sizeof(struct shrm_dev), GFP_KERNEL); + if (shrm == NULL) { + dev_err(&pdev->dev, + "Could not allocate memory for struct shm_dev\n"); + return -ENOMEM; + } + + shrm->dev = &pdev->dev; + /* initialise the SHM */ + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res) { + dev_err(shrm->dev, + "Unable to map Ca Wake up interrupt\n"); + err = -EBUSY; + goto rollback_intr; + } + shrm->ca_wake_irq = res->start; + res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); + + if (!res) { + dev_err(shrm->dev, + "Unable to map APE_Read_notif_common IRQ base\n"); + err = -EBUSY; + goto rollback_intr; + } + shrm->ac_read_notif_0_irq = res->start; + res = platform_get_resource(pdev, IORESOURCE_IRQ, 2); + + if (!res) { + dev_err(shrm->dev, + "Unable to map APE_Read_notif_audio IRQ base\n"); + err = -EBUSY; + goto rollback_intr; + } + shrm->ac_read_notif_1_irq = res->start; + res = platform_get_resource(pdev, IORESOURCE_IRQ, 3); + + if (!res) { + dev_err(shrm->dev, + "Unable to map Cmt_msg_pending_notif_common IRQbase\n"); + err = -EBUSY; + goto rollback_intr; + } + shrm->ca_msg_pending_notif_0_irq = res->start; + res = platform_get_resource(pdev, IORESOURCE_IRQ, 4); + + if (!res) { + dev_err(shrm->dev, + "Unable to map Cmt_msg_pending_notif_audio IRQ base\n"); + err = -EBUSY; + goto rollback_intr; + } + shrm->ca_msg_pending_notif_1_irq = res->start; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + if (!res) { + dev_err(shrm->dev, + "Could not get SHM IO memory information\n"); + err = -ENODEV; + goto rollback_intr; + } + shrm->intr_base = (void __iomem *)ioremap_nocache(res->start, + res->end - res->start + 1); + if (!(shrm->intr_base)) { + dev_err(shrm->dev, "Unable to map register base\n"); + err = -EBUSY; + goto rollback_intr; + } + shrm->ape_common_fifo_base_phy = + (u32 *)U8500_SHM_FIFO_APE_COMMON_BASE; + shrm->ape_common_fifo_base = + (void __iomem *)ioremap_nocache( + U8500_SHM_FIFO_APE_COMMON_BASE, + SHM_FIFO_0_SIZE); + shrm->ape_common_fifo_size = (SHM_FIFO_0_SIZE)/4; + + if (!(shrm->ape_common_fifo_base)) { + dev_err(shrm->dev, "Unable to map register base\n"); + err = -EBUSY; + goto rollback_ape_common_fifo_base; + } + shrm->cmt_common_fifo_base_phy = + (u32 *)U8500_SHM_FIFO_CMT_COMMON_BASE; + shrm->cmt_common_fifo_base = + (void __iomem *)ioremap_nocache( + U8500_SHM_FIFO_CMT_COMMON_BASE, SHM_FIFO_0_SIZE); + shrm->cmt_common_fifo_size = (SHM_FIFO_0_SIZE)/4; + + if (!(shrm->cmt_common_fifo_base)) { + dev_err(shrm->dev, "Unable to map register base\n"); + err = -EBUSY; + goto rollback_cmt_common_fifo_base; + } + shrm->ape_audio_fifo_base_phy = + (u32 *)U8500_SHM_FIFO_APE_AUDIO_BASE; + shrm->ape_audio_fifo_base = + (void __iomem *)ioremap_nocache(U8500_SHM_FIFO_APE_AUDIO_BASE, + SHM_FIFO_1_SIZE); + shrm->ape_audio_fifo_size = (SHM_FIFO_1_SIZE)/4; + + if (!(shrm->ape_audio_fifo_base)) { + dev_err(shrm->dev, "Unable to map register base\n"); + err = -EBUSY; + goto rollback_ape_audio_fifo_base; + } + shrm->cmt_audio_fifo_base_phy = + (u32 *)U8500_SHM_FIFO_CMT_AUDIO_BASE; + shrm->cmt_audio_fifo_base = + (void __iomem *)ioremap_nocache(U8500_SHM_FIFO_CMT_AUDIO_BASE, + SHM_FIFO_1_SIZE); + shrm->cmt_audio_fifo_size = (SHM_FIFO_1_SIZE)/4; + + if (!(shrm->cmt_audio_fifo_base)) { + dev_err(shrm->dev, "Unable to map register base\n"); + err = -EBUSY; + goto rollback_cmt_audio_fifo_base; + } + shrm->ac_common_shared_wptr = + (void __iomem *)ioremap(SHM_ACFIFO_0_WRITE_AMCU, SHM_PTR_SIZE); + + if (!(shrm->ac_common_shared_wptr)) { + dev_err(shrm->dev, "Unable to map register base\n"); + err = -EBUSY; + goto rollback_ac_common_shared_wptr; + } + shrm->ac_common_shared_rptr = + (void __iomem *)ioremap(SHM_ACFIFO_0_READ_AMCU, SHM_PTR_SIZE); + + if (!(shrm->ac_common_shared_rptr)) { + dev_err(shrm->dev, "Unable to map register base\n"); + err = -EBUSY; + goto rollback_map; + } + shrm->ca_common_shared_wptr = + (void __iomem *)ioremap(SHM_CAFIFO_0_WRITE_AMCU, SHM_PTR_SIZE); + + if (!(shrm->ca_common_shared_wptr)) { + dev_err(shrm->dev, "Unable to map register base\n"); + err = -EBUSY; + goto rollback_map; + } + shrm->ca_common_shared_rptr = + (void __iomem *)ioremap(SHM_CAFIFO_0_READ_AMCU, SHM_PTR_SIZE); + + if (!(shrm->ca_common_shared_rptr)) { + dev_err(shrm->dev, "Unable to map register base\n"); + err = -EBUSY; + goto rollback_map; + } + shrm->ac_audio_shared_wptr = + (void __iomem *)ioremap(SHM_ACFIFO_1_WRITE_AMCU, SHM_PTR_SIZE); + + if (!(shrm->ac_audio_shared_wptr)) { + dev_err(shrm->dev, "Unable to map register base\n"); + err = -EBUSY; + goto rollback_map; + } + shrm->ac_audio_shared_rptr = + (void __iomem *)ioremap(SHM_ACFIFO_1_READ_AMCU, SHM_PTR_SIZE); + + if (!(shrm->ac_audio_shared_rptr)) { + dev_err(shrm->dev, "Unable to map register base\n"); + err = -EBUSY; + goto rollback_map; + } + shrm->ca_audio_shared_wptr = + (void __iomem *)ioremap(SHM_CAFIFO_1_WRITE_AMCU, SHM_PTR_SIZE); + + if (!(shrm->ca_audio_shared_wptr)) { + dev_err(shrm->dev, "Unable to map register base\n"); + err = -EBUSY; + goto rollback_map; + } + shrm->ca_audio_shared_rptr = + (void __iomem *)ioremap(SHM_CAFIFO_1_READ_AMCU, SHM_PTR_SIZE); + + if (!(shrm->ca_audio_shared_rptr)) { + dev_err(shrm->dev, "Unable to map register base\n"); + err = -EBUSY; + goto rollback_map; + } + + if (isa_init(shrm) != 0) { + dev_err(shrm->dev, "Driver Initialization Error\n"); + err = -EBUSY; + } + /* install handlers and tasklets */ + if (shm_initialise_irq(shrm)) { + dev_err(shrm->dev, + "shm error in interrupt registration\n"); + goto rollback_irq; + } +#ifdef CONFIG_HIGH_RES_TIMERS + hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + timer.function = callback; + hrtimer_start(&timer, ktime_set(0, 2*NSEC_PER_MSEC), HRTIMER_MODE_REL); +#endif + err = shrm_register_netdev(shrm); + if (err < 0) + goto rollback_irq; + + tasklet_init(&phonet_rcv_tasklet, do_phonet_rcv_tasklet, 0); + phonet_rcv_tasklet.data = (unsigned long)shrm; + + platform_set_drvdata(pdev, shrm); + + return err; +rollback_irq: + free_shm_irq(shrm); +rollback_map: + iounmap(shrm->ac_common_shared_wptr); + iounmap(shrm->ac_common_shared_rptr); + iounmap(shrm->ca_common_shared_wptr); + iounmap(shrm->ca_common_shared_rptr); + iounmap(shrm->ac_audio_shared_wptr); + iounmap(shrm->ac_audio_shared_rptr); + iounmap(shrm->ca_audio_shared_wptr); + iounmap(shrm->ca_audio_shared_rptr); +rollback_ac_common_shared_wptr: + iounmap(shrm->cmt_audio_fifo_base); +rollback_cmt_audio_fifo_base: + iounmap(shrm->ape_audio_fifo_base); +rollback_ape_audio_fifo_base: + iounmap(shrm->cmt_common_fifo_base); +rollback_cmt_common_fifo_base: + iounmap(shrm->ape_common_fifo_base); +rollback_ape_common_fifo_base: + iounmap(shrm->intr_base); +rollback_intr: + kfree(shrm); + return err; +} + +static int __exit shrm_remove(struct platform_device *pdev) +{ + struct shrm_dev *shrm = platform_get_drvdata(pdev); + + free_shm_irq(shrm); + iounmap(shrm->intr_base); + iounmap(shrm->ape_common_fifo_base); + iounmap(shrm->cmt_common_fifo_base); + iounmap(shrm->ape_audio_fifo_base); + iounmap(shrm->cmt_audio_fifo_base); + iounmap(shrm->ac_common_shared_wptr); + iounmap(shrm->ac_common_shared_rptr); + iounmap(shrm->ca_common_shared_wptr); + iounmap(shrm->ca_common_shared_rptr); + iounmap(shrm->ac_audio_shared_wptr); + iounmap(shrm->ac_audio_shared_rptr); + iounmap(shrm->ca_audio_shared_wptr); + iounmap(shrm->ca_audio_shared_rptr); + shrm_unregister_netdev(shrm); + isa_exit(shrm); + kfree(shrm); + + return 0; +} + +#ifdef CONFIG_PM +/** + * u8500_shrm_suspend() - This routine puts the SHRM in to sustend state. + * @dev: pointer to device structure. + * + * This routine checks the current ongoing communication with Modem by + * examining the ca_wake state and prevents suspend if modem communication + * is on-going. + * If ca_wake = 1 (high), modem comm. is on-going; don't suspend + * If ca_wake = 0 (low), no comm. with modem on-going.Allow suspend + */ +int u8500_shrm_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct shrm_dev *shrm = platform_get_drvdata(pdev); + int err; + + dev_dbg(&pdev->dev, "%s called...\n", __func__); + dev_dbg(&pdev->dev, "ca_wake_req_state = %x\n", + get_ca_wake_req_state()); + + /* if ca_wake_req is high, prevent system suspend */ + if (!get_ca_wake_req_state()) { + err = shrm_suspend_netdev(shrm->ndev); + return err; + } else + return -EBUSY; +} + +/** + * u8500_shrm_resume() - This routine resumes the SHRM from suspend state. + * @dev: pointer to device structure + * + * This routine restore back the current state of the SHRM + */ +int u8500_shrm_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct shrm_dev *shrm = platform_get_drvdata(pdev); + int err; + + dev_dbg(&pdev->dev, "%s called...\n", __func__); + err = shrm_resume_netdev(shrm->ndev); + + return err; +} + +static const struct dev_pm_ops shrm_dev_pm_ops = { + .suspend_noirq = u8500_shrm_suspend, + .resume_noirq = u8500_shrm_resume, +}; +#endif + +static struct platform_driver shrm_driver = { + .remove = __exit_p(shrm_remove), + .driver = { + .name = "u8500_shrm", + .owner = THIS_MODULE, +#ifdef CONFIG_PM + .pm = &shrm_dev_pm_ops, +#endif + }, +}; + +static int __init shrm_driver_init(void) +{ + return platform_driver_probe(&shrm_driver, shrm_probe); +} + +static void __exit shrm_driver_exit(void) +{ + platform_driver_unregister(&shrm_driver); +} + +module_init(shrm_driver_init); +module_exit(shrm_driver_exit); + +MODULE_AUTHOR("Biju Das, Kumar Sanghvi, Arun Murthy"); +MODULE_DESCRIPTION("Shared Memory Modem Driver Interface"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/shrm/shrm_driver.c b/drivers/misc/shrm/shrm_driver.c new file mode 100644 index 00000000000..6277794608a --- /dev/null +++ b/drivers/misc/shrm/shrm_driver.c @@ -0,0 +1,1439 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * Author: Biju Das <biju.das@stericsson.com> for ST-Ericsson + * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com> for ST-Ericsson + * Author: Arun Murthy <arun.murthy@stericsson.com> for ST-Ericsson + * License terms: GNU General Public License (GPL) version 2 + */ + +#define DEBUG + +#include <linux/err.h> +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/ioport.h> +#include <linux/smp_lock.h> +#include <linux/poll.h> +#include <linux/mutex.h> +#include <linux/uaccess.h> +#include <asm/atomic.h> +#include <linux/io.h> +#include <linux/slab.h> + +#include <mach/isa_ioctl.h> +#include <mach/shrm_driver.h> +#include <mach/shrm_private.h> +#include <mach/shrm_config.h> +#include <mach/shrm.h> + + +#ifdef CONFIG_HIGH_RES_TIMERS +#include <linux/hrtimer.h> +static struct hrtimer timer; +#endif + + +#define NAME "IPC_ISA" +#define ISA_DEVICES 4 +/**debug functionality*/ +#define ISA_DEBUG 0 + +#define ISI_MESSAGING (0) +#define RPC_MESSAGING (1) +#define AUDIO_MESSAGING (2) +#define SECURITY_MESSAGING (3) + +#define SIZE_OF_FIFO (512*1024) + +static u8 message_fifo[4][SIZE_OF_FIFO]; + +static u8 wr_isi_msg[10*1024]; +static u8 wr_rpc_msg[10*1024]; +static u8 wr_sec_msg[10*1024]; +static u8 wr_audio_msg[10*1024]; + +/* global data */ +/* + * int major:This variable is exported to user as module_param to specify + * major number at load time + */ +static int major; +module_param(major, int, 0); +MODULE_PARM_DESC(major, "Major device number"); +/* global fops mutex */ +static DEFINE_MUTEX(isa_lock); +rx_cb common_rx; +rx_cb audio_rx; + + +static int isi_receive(struct shrm_dev *shrm, void *data, u32 n_bytes); +static int rpc_receive(struct shrm_dev *shrm, void *data, u32 n_bytes); +static int audio_receive(struct shrm_dev *shrm, void *data, u32 n_bytes); +static int security_receive(struct shrm_dev *shrm, + void *data, u32 n_bytes); + +static void rx_common_l2msg_handler(u8 l2_header, + void *msg, u32 length, + struct shrm_dev *shrm) +{ + int ret = 0; +#ifdef CONFIG_U8500_SHRM_LOOP_BACK + u8 *pdata; +#endif + dev_dbg(shrm->dev, "%s IN\n", __func__); + + switch (l2_header) { + case ISI_MESSAGING: + ret = isi_receive(shrm, msg, length); + if (ret < 0) + dev_err(shrm->dev, "isi receive failed\n"); + break; + case RPC_MESSAGING: + ret = rpc_receive(shrm, msg, length); + if (ret < 0) + dev_err(shrm->dev, "rpc receive failed\n"); + break; + case SECURITY_MESSAGING: + ret = security_receive(shrm, msg, length); + if (ret < 0) + dev_err(shrm->dev, + "security receive failed\n"); + break; +#ifdef CONFIG_U8500_SHRM_LOOP_BACK + case COMMMON_LOOPBACK_MESSAGING: + pdata = (u8 *)msg; + if ((*pdata == 0x50) || (*pdata == 0xAF)) { + ret = isi_receive(shrm, msg, length); + if (ret < 0) + dev_err(shrm->dev, "isi receive failed\n"); + } else if ((*pdata == 0x0A) || (*pdata == 0xF5)) { + ret = rpc_receive(shrm, msg, length); + if (ret < 0) + dev_err(shrm->dev, "rpc receive failed\n"); + } else if ((*pdata == 0xFF) || (*pdata == 0x00)) { + ret = security_receive(shrm, msg, length); + if (ret < 0) + dev_err(shrm->dev, + "security receive failed\n"); + } + break; +#endif + default: + break; + } + dev_dbg(shrm->dev, "%s OUT\n", __func__); +} + +static void rx_audio_l2msg_handler(u8 l2_header, + void *msg, u32 length, + struct shrm_dev *shrm) +{ + int ret = 0; + + dev_dbg(shrm->dev, "%s IN\n", __func__); + audio_receive(shrm, msg, length); + if (ret < 0) + dev_err(shrm->dev, "audio receive failed\n"); + dev_dbg(shrm->dev, "%s OUT\n", __func__); +} + +static int __init shm_initialise_irq(struct shrm_dev *shrm) +{ + int err = 0; + + shrm_protocol_init(shrm, + rx_common_l2msg_handler, rx_audio_l2msg_handler); + + err = request_irq(shrm->ca_wake_irq, + ca_wake_irq_handler, IRQF_TRIGGER_RISING, + "ca_wake-up", shrm); + if (err < 0) { + dev_err(shrm->dev, + "Unable to allocate shm tx interrupt line\n"); + return err; + } + + err = request_irq(shrm->ac_read_notif_0_irq, + ac_read_notif_0_irq_handler, 0, + "ac_read_notif_0", shrm); + if (err < 0) { + dev_err(shrm->dev, + "error ac_read_notif_0_irq interrupt line\n"); + goto irq_err1; + } + + err = request_irq(shrm->ac_read_notif_1_irq, + ac_read_notif_1_irq_handler, 0, + "ac_read_notif_1", shrm); + if (err < 0) { + dev_err(shrm->dev, + "error ac_read_notif_1_irq interrupt line\n"); + goto irq_err2; + } + + err = request_irq(shrm->ca_msg_pending_notif_0_irq, + ca_msg_pending_notif_0_irq_handler, 0, + "ca_msg_pending_notif_0", shrm); + if (err < 0) { + dev_err(shrm->dev, + "error ca_msg_pending_notif_0_irq line\n"); + goto irq_err3; + } + + err = request_irq(shrm->ca_msg_pending_notif_1_irq, + ca_msg_pending_notif_1_irq_handler, 0, + "ca_msg_pending_notif_1", shrm); + if (err < 0) { + dev_err(shrm->dev, + "error ca_msg_pending_notif_1_irq interrupt line\n"); + goto irq_err4; + } + + return err; + +irq_err4: + free_irq(shrm->ca_msg_pending_notif_0_irq, shrm); +irq_err3: + free_irq(shrm->ac_read_notif_1_irq, shrm); +irq_err2: + free_irq(shrm->ac_read_notif_0_irq, shrm); +irq_err1: + free_irq(shrm->ca_wake_irq, shrm); + return err; +} + +static void free_shm_irq(struct shrm_dev *shrm) +{ + free_irq(shrm->ca_wake_irq, shrm); + free_irq(shrm->ac_read_notif_0_irq, shrm); + free_irq(shrm->ac_read_notif_1_irq, shrm); + free_irq(shrm->ca_msg_pending_notif_0_irq, shrm); + free_irq(shrm->ca_msg_pending_notif_1_irq, shrm); +} + +/** + * create_queue() - To create FIFO for Tx and Rx message buffering. + * @q: message queue. + * @devicetype: device type 0-isi,1-rpc,2-audio,3-security. + * + * This function creates a FIFO buffer of n_bytes size using + * dma_alloc_coherent(). It also initializes all queue handling + * locks, queue management pointers. It also initializes message list + * which occupies this queue. + * + * It return -ENOMEM in case of no memory. + */ +static int create_queue(struct message_queue *q, u32 devicetype, + struct shrm_dev *shrm) +{ + q->fifo_base = (u8 *)&message_fifo[devicetype]; + q->size = SIZE_OF_FIFO; + q->readptr = 0; + q->writeptr = 0; + q->no = 0; + q->shrm = shrm; + spin_lock_init(&q->update_lock); + INIT_LIST_HEAD(&q->msg_list); + init_waitqueue_head(&q->wq_readable); + atomic_set(&q->q_rp, 0); + + return 0; +} +/** + * delete_queue() - To delete FIFO and assiciated memory. + * @q: message queue + * + * This function deletes FIFO created using create_queue() function. + * It resets queue management pointers. + */ +static void delete_queue(struct message_queue *q) +{ + q->size = 0; + q->readptr = 0; + q->writeptr = 0; +} + +/** + * add_msg_to_queue() - Add a message inside inside queue + * + * @q: message queue + * @size: size in bytes + * + * This function tries to allocate n_bytes of size in FIFO q. + * It returns negative number when no memory can be allocated + * currently. + */ +int add_msg_to_queue(struct message_queue *q, u32 size) +{ + struct queue_element *new_msg = NULL; + struct shrm_dev *shrm = q->shrm; + + dev_dbg(shrm->dev, "%s IN q->writeptr=%d\n", + __func__, q->writeptr); + new_msg = kmalloc(sizeof(struct queue_element), + GFP_KERNEL|GFP_ATOMIC); + + if (new_msg == NULL) { + dev_err(shrm->dev, "memory overflow inside while(1)\n"); + return -ENOMEM; + } + new_msg->offset = q->writeptr; + new_msg->size = size; + new_msg->no = q->no++; + + /* check for overflow condition */ + if (q->readptr <= q->writeptr) { + if (((q->writeptr-q->readptr) + size) >= q->size) { + dev_err(shrm->dev, "Buffer overflow !!\n"); + BUG_ON(((q->writeptr-q->readptr) + size) >= q->size); + } + } else { + if ((q->writeptr + size) >= q->readptr) { + dev_err(shrm->dev, "Buffer overflow !!\n"); + BUG_ON((q->writeptr + size) >= q->readptr); + } + } + q->writeptr = (q->writeptr + size) % q->size; + if (list_empty(&q->msg_list)) { + list_add_tail(&new_msg->entry, &q->msg_list); + /* There can be 2 blocking calls read and another select */ + + atomic_set(&q->q_rp, 1); + wake_up_interruptible(&q->wq_readable); + } else + list_add_tail(&new_msg->entry, &q->msg_list); + + dev_dbg(shrm->dev, "%s OUT\n", __func__); + return 0; +} + +/** + * remove_msg_from_queue() - To remove a message from the msg queue. + * + * @q: message queue + * + * This function delets a message from the message list associated with message + * queue q and also updates read ptr. + * If the message list is empty, then, event is set to block the select and + * read calls of the paricular queue. + * + * The message list is FIFO style and message is always added to tail and + * removed from head. + */ + +int remove_msg_from_queue(struct message_queue *q) +{ + struct queue_element *old_msg = NULL; + struct shrm_dev *shrm = q->shrm; + struct list_head *msg; + + dev_dbg(shrm->dev, "%s IN q->readptr %d\n", + __func__, q->readptr); + + list_for_each(msg, &q->msg_list) { + old_msg = list_entry(msg, struct queue_element, entry); + if (old_msg == NULL) { + dev_err(shrm->dev, ":no message found\n"); + return -EFAULT; + } + break; + } + list_del(msg); + q->readptr = (q->readptr + old_msg->size) % q->size; + if (list_empty(&q->msg_list)) { + dev_dbg(shrm->dev, "List is empty setting RP= 0\n"); + atomic_set(&q->q_rp, 0); + } + kfree(old_msg); + + dev_dbg(shrm->dev, "%s OUT\n", __func__); + return 0; +} + +/** + * get_size_of_new_msg() - retrieve new message from message list + * + * @q: message queue + * + * This function will retrieve most recent message from the corresponding + * queue list. New message is always retrieved from head side. + * It returns new message no, offset if FIFO and size. + */ +int get_size_of_new_msg(struct message_queue *q) +{ + struct queue_element *new_msg = NULL; + struct list_head *msg_list; + struct shrm_dev *shrm = q->shrm; + + dev_dbg(shrm->dev, "%s IN\n", __func__); + + spin_lock_bh(&q->update_lock); + list_for_each(msg_list, &q->msg_list) { + new_msg = list_entry(msg_list, struct queue_element, entry); + if (new_msg == NULL) { + spin_unlock_bh(&q->update_lock); + dev_err(shrm->dev, "no message found\n"); + return -1; + } + break; + } + spin_unlock_bh(&q->update_lock); + + dev_dbg(shrm->dev, "%s OUT\n", __func__); + return new_msg->size; +} + +/** + * isi_receive() - Rx Completion callback + * + * @data:message pointer + * @n_bytes:message size + * + * This function is a callback to indicate ISI message reception is complete. + * It updates Writeptr of the Fifo + */ +static int isi_receive(struct shrm_dev *shrm, + void *data, u32 n_bytes) +{ + u32 size = 0; + int ret = 0; + u8 *psrc; + struct message_queue *q; + struct isadev_context *isidev = &shrm->isa_context->isadev[0]; + + dev_dbg(shrm->dev, "%s IN\n", __func__); + q = &isidev->dl_queue; + spin_lock(&q->update_lock); + /* Memcopy RX data first */ + if ((q->writeptr+n_bytes) >= q->size) { + dev_dbg(shrm->dev, "Inside Loop Back\n"); + psrc = (u8 *)data; + size = (q->size-q->writeptr); + /* Copy First Part of msg */ + memcpy((q->fifo_base+q->writeptr), psrc, size); + psrc += size; + /* Copy Second Part of msg at the top of fifo */ + memcpy(q->fifo_base, psrc, (n_bytes-size)); + } else { + memcpy((q->fifo_base+q->writeptr), data, n_bytes); + } + ret = add_msg_to_queue(q, n_bytes); + if (ret < 0) + dev_err(shrm->dev, "Adding msg to message queue failed\n"); + spin_unlock(&q->update_lock); + dev_dbg(shrm->dev, "%s OUT\n", __func__); + return ret; +} + +/** + * rpc_receive() - Rx Completion callback + * + * @data:message pointer + * @n_bytes:message size + * + * This function is a callback to indicate RPC message reception is complete. + * It updates Writeptr of the Fifo + */ +static int rpc_receive(struct shrm_dev *shrm, + void *data, u32 n_bytes) +{ + u32 size = 0; + int ret = 0; + u8 *psrc; + struct message_queue *q; + struct isadev_context *rpcdev = &shrm->isa_context->isadev[1]; + + dev_dbg(shrm->dev, "%s IN\n", __func__); + q = &rpcdev->dl_queue; + spin_lock(&q->update_lock); + /* Memcopy RX data first */ + if ((q->writeptr+n_bytes) >= q->size) { + psrc = (u8 *)data; + size = (q->size-q->writeptr); + /* Copy First Part of msg */ + memcpy((q->fifo_base+q->writeptr), psrc, size); + psrc += size; + /* Copy Second Part of msg at the top of fifo */ + memcpy(q->fifo_base, psrc, (n_bytes-size)); + } else { + memcpy((q->fifo_base+q->writeptr), data, n_bytes); + } + + ret = add_msg_to_queue(q, n_bytes); + if (ret < 0) + dev_err(shrm->dev, "Adding msg to message queue failed\n"); + spin_unlock(&q->update_lock); + dev_dbg(shrm->dev, "%s OUT\n", __func__); + return ret; +} + +/** + * audio_receive() - Rx Completion callback + * + * @data:message pointer + * @n_bytes:message size + * + * This function is a callback to indicate audio message reception is complete. + * It updates Writeptr of the Fifo + */ +static int audio_receive(struct shrm_dev *shrm, + void *data, u32 n_bytes) +{ + u32 size = 0; + int ret = 0; + u8 *psrc; + struct message_queue *q; + struct isadev_context *audiodev = &shrm->isa_context->isadev[2]; + + dev_dbg(shrm->dev, "%s IN\n", __func__); + q = &audiodev->dl_queue; + spin_lock(&q->update_lock); + /* Memcopy RX data first */ + if ((q->writeptr+n_bytes) >= q->size) { + psrc = (u8 *)data; + size = (q->size-q->writeptr); + /* Copy First Part of msg */ + memcpy((q->fifo_base+q->writeptr), psrc, size); + psrc += size; + /* Copy Second Part of msg at the top of fifo */ + memcpy(q->fifo_base, psrc, (n_bytes-size)); + } else { + memcpy((q->fifo_base+q->writeptr), data, n_bytes); + } + ret = add_msg_to_queue(q, n_bytes); + if (ret < 0) + dev_err(shrm->dev, "Adding msg to message queue failed\n"); + spin_unlock(&q->update_lock); + dev_dbg(shrm->dev, "%s OUT\n", __func__); + return ret; +} + +/** + * security_receive() - Rx Completion callback + * + * @data:message pointer + * @n_bytes: message size + * + * This function is a callback to indicate security message reception + * is complete.It updates Writeptr of the Fifo + */ +static int security_receive(struct shrm_dev *shrm, + void *data, u32 n_bytes) +{ + u32 size = 0; + int ret = 0; + u8 *psrc; + struct message_queue *q; + struct isadev_context *secdev = &shrm->isa_context->isadev[3]; + + dev_dbg(shrm->dev, "%s IN\n", __func__); + q = &secdev->dl_queue; + spin_lock(&q->update_lock); + /* Memcopy RX data first */ + if ((q->writeptr+n_bytes) >= q->size) { + psrc = (u8 *)data; + size = (q->size-q->writeptr); + /* Copy First Part of msg */ + memcpy((q->fifo_base+q->writeptr), psrc, size); + psrc += size; + /* Copy Second Part of msg at the top of fifo */ + memcpy(q->fifo_base, psrc, (n_bytes-size)); + } else { + memcpy((q->fifo_base+q->writeptr), data, n_bytes); + } + ret = add_msg_to_queue(q, n_bytes); + if (ret < 0) + dev_err(shrm->dev, "Adding msg to message queue failed\n"); + spin_unlock(&q->update_lock); + dev_dbg(shrm->dev, "%s OUT\n", __func__); + return ret; +} + + +/** + * isa_select() - Select Interface + * + * @filp:file descriptor pointer + * @wait:poll_table_struct pointer + * + * This function is used to perform non-blocking read operations. It allows + * a process to determine whether it can read from one or more open files + * without blocking. These calls can also block a process until any of a + * given set of file descriptors becomes available for reading. + * If a file is ready to read, POLLIN | POLLRDNORM bitmask is returned. + * The driver method is called whenever the user-space program performs a select + * system call involving a file descriptor associated with the driver. + */ +static u32 isa_select(struct file *filp, + struct poll_table_struct *wait) +{ + struct isadev_context *isadev = filp->private_data; + struct shrm_dev *shrm = isadev->dl_queue.shrm; + struct message_queue *q; + u32 mask = 0; + u32 m = iminor(filp->f_path.dentry->d_inode); + + dev_dbg(shrm->dev, "%s IN\n", __func__); + + if (isadev->device_id != m) + return -1; + q = &isadev->dl_queue; + poll_wait(filp, &q->wq_readable, wait); + if (atomic_read(&q->q_rp) == 1) + mask = POLLIN | POLLRDNORM; + dev_dbg(shrm->dev, "%s OUT\n", __func__); + return mask; +} + +/** + * isa_read() - Read from device + * + * @filp:file descriptor + * @buf:user buffer pointer + * @len:size of requested data transfer + * @ppos:not used + * + * This function is called whenever user calls read() system call. + * It reads a oldest message from queue and copies it into user buffer and + * returns its size. + * If there is no message present in queue, then it blocks until new data is + * available. + */ +ssize_t isa_read(struct file *filp, char __user *buf, + size_t len, loff_t *ppos) +{ + struct isadev_context *isadev = (struct isadev_context *) + filp->private_data; + struct shrm_dev *shrm = isadev->dl_queue.shrm; + struct message_queue *q; + char *psrc; + u32 msgsize; + u32 size = 0; + int ret = 0; + + dev_dbg(shrm->dev, "%s IN\n", __func__); + + if (len <= 0) + return -EFAULT; + q = &isadev->dl_queue; + + spin_lock_bh(&q->update_lock); + if (list_empty(&q->msg_list)) { + spin_unlock_bh(&q->update_lock); + if (wait_event_interruptible(q->wq_readable, + atomic_read(&q->q_rp) == 1)) { + return -ERESTARTSYS; + } + } else + spin_unlock_bh(&q->update_lock); + + msgsize = get_size_of_new_msg(q); + if ((q->readptr+msgsize) >= q->size) { + dev_dbg(shrm->dev, "Inside Loop Back\n"); + psrc = (char *)buf; + size = (q->size-q->readptr); + /* Copy First Part of msg */ + if (copy_to_user(psrc, + (u8 *)(q->fifo_base+q->readptr), + size)) { + dev_err(shrm->dev, "copy_to_user failed\n"); + return -EFAULT; + } + psrc += size; + /* Copy Second Part of msg at the top of fifo */ + if (copy_to_user(psrc, + (u8 *)(q->fifo_base), + (msgsize-size))) { + dev_err(shrm->dev, "copy_to_user failed\n"); + return -EFAULT; + } + } else { + if (copy_to_user(buf, + (u8 *)(q->fifo_base+q->readptr), + msgsize)) { + dev_err(shrm->dev, "copy_to_user failed\n"); + return -EFAULT; + } + } + + spin_lock_bh(&q->update_lock); + ret = remove_msg_from_queue(q); + if (ret < 0) { + dev_err(shrm->dev, + "Removing msg from message queue failed\n"); + msgsize = ret; + } + spin_unlock_bh(&q->update_lock); + dev_dbg(shrm->dev, "%s OUT\n", __func__); + return msgsize; +} +/** + * isa_write() - Write to device + * + * @filp:file descriptor + * @buf:user buffer pointer + * @len:size of requested data transfer + * @ppos:not used + * + * This function is called whenever user calls write() system call. + * It checks if there is space available in queue, and copies the message + * inside queue. If there is no space, it blocks until space becomes available. + * It also schedules transfer thread to transmit the newly added message. + */ +static ssize_t isa_write(struct file *filp, const char __user *buf, + size_t len, loff_t *ppos) +{ + struct isadev_context *isadev = filp->private_data; + struct shrm_dev *shrm = isadev->dl_queue.shrm; + struct message_queue *q; + int err, ret; + void *addr = 0; + u8 l2_header = 0; + + dev_dbg(shrm->dev, "%s IN\n", __func__); + if (len <= 0) + return -EFAULT; + q = &isadev->dl_queue; + + switch (isadev->device_id) { + case ISI_MESSAGING: + dev_dbg(shrm->dev, "ISI\n"); + addr = (void *)wr_isi_msg; +#ifdef CONFIG_U8500_SHRM_LOOP_BACK + dev_dbg(shrm->dev, "Loopback\n"); + l2_header = COMMON_LOOPBACK_MESSAGING; +#else + l2_header = isadev->device_id; +#endif + break; + case RPC_MESSAGING: + dev_dbg(shrm->dev, "RPC\n"); + addr = (void *)wr_rpc_msg; +#ifdef CONFIG_U8500_SHRM_LOOP_BACK + l2_header = COMMON_LOOPBACK_MESSAGING; +#else + l2_header = isadev->device_id; +#endif + break; + case AUDIO_MESSAGING: + dev_dbg(shrm->dev, "Audio\n"); + addr = (void *)wr_audio_msg; +#ifdef CONFIG_U8500_SHRM_LOOP_BACK + l2_header = AUDIO_LOOPBACK_MESSAGING; +#else + l2_header = isadev->device_id; +#endif + + break; + case SECURITY_MESSAGING: + dev_dbg(shrm->dev, "Security\n"); + addr = (void *)wr_sec_msg; +#ifdef CONFIG_U8500_SHRM_LOOP_BACK + l2_header = COMMON_LOOPBACK_MESSAGING; +#else + l2_header = isadev->device_id; +#endif + break; + default: + dev_dbg(shrm->dev, "Wrong device\n"); + return -EFAULT; + } + + if (copy_from_user(addr, buf, len)) { + dev_err(shrm->dev, "copy_from_user failed\n"); + return -EFAULT; + } + + /* Write msg to Fifo */ + if (isadev->device_id == 2) { + mutex_lock(&shrm->isa_context->tx_audio_mutex); + err = shm_write_msg(shrm, l2_header, addr, len); + if (!err) + ret = len; + else + ret = err; + mutex_unlock(&shrm->isa_context->tx_audio_mutex); + } else { + spin_lock_bh(&shrm->isa_context->common_tx); + err = shm_write_msg(shrm, l2_header, addr, len); + if (!err) + ret = len; + else + ret = err; + spin_unlock_bh(&shrm->isa_context->common_tx); + } + + dev_dbg(shrm->dev, "%s OUT\n", __func__); + return ret; +} + +/** + * isa_ioctl() - To handle different ioctl commands supported by driver. + * + * @inode: structure is used by the kernel internally to represent files + * @filp:file descriptor pointer + * @cmd:ioctl command + * @arg:input param + * + * Following ioctls are supported by this driver. + * DLP_IOCTL_ALLOCATE_BUFFER - To allocate buffer for new uplink message. + * This ioctl is called with required message size. It returns offset for + * the allocates space in the queue. DLP_IOCTL_PUT_MESSAGE - To indicate + * new uplink message available in queuq for transmission. Message is copied + * from offset location returned by previous ioctl before calling this ioctl. + * DLP_IOCTL_GET_MESSAGE - To check if any downlink message is available in + * queue. It returns offset for new message inside queue. + * DLP_IOCTL_DEALLOCATE_BUFFER - To deallocate any buffer allocate for + * downlink message once the message is copied. Message is copied from offset + * location returned by previous ioctl before calling this ioctl. + */ +static int isa_ioctl(struct inode *inode, struct file *filp, + unsigned cmd, unsigned long arg) +{ + int err = 0; + struct isadev_context *isadev = filp->private_data; + struct shrm_dev *shrm = isadev->dl_queue.shrm; + u32 m = iminor(inode); + + if (isadev->device_id != m) + return -1; + + switch (cmd) { + case DLP_IOC_ALLOCATE_BUFFER: + dev_dbg(shrm->dev, "DLP_IOC_ALLOCATE_BUFFER\n"); + break; + case DLP_IOC_PUT_MESSAGE: + dev_dbg(shrm->dev, "DLP_IOC_PUT_MESSAGE\n"); + break; + case DLP_IOC_GET_MESSAGE: + dev_dbg(shrm->dev, "DLP_IOC_GET_MESSAGE\n"); + break; + case DLP_IOC_DEALLOCATE_BUFFER: + dev_dbg(shrm->dev, "DLP_IOC_DEALLOCATE_BUFFER\n"); + break; + default: + dev_dbg(shrm->dev, "Unknown IOCTL\n"); + err = -1; + break; + } + return err; +} +/** + * isa_mmap() - Maps kernel queue memory to user space. + * + * @filp:file descriptor pointer + * @vma:virtual area memory structure. + * + * This function maps kernel FIFO into user space. This function + * shall be called twice to map both uplink and downlink buffers. + */ +static int isa_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct isadev_context *isadev = filp->private_data; + struct shrm_dev *shrm = isadev->dl_queue.shrm; + + u32 m = iminor(filp->f_path.dentry->d_inode); + dev_dbg(shrm->dev, "%s %dIN\n", __func__, m); + + isadev = (struct isadev_context *)filp->private_data; + return 0; +} + +/** + * isa_close() - Close device file + * + * @inode:structure is used by the kernel internally to represent files + * @filp:device file descriptor + * + * This function deletes structues associated with this file, deletes + * queues, flushes and destroys workqueus and closes this file. + * It also unregisters itself from l2mux driver. + */ +static int isa_close(struct inode *inode, struct file *filp) +{ + struct isadev_context *isadev = filp->private_data; + struct shrm_dev *shrm = isadev->dl_queue.shrm; + struct isa_driver_context *isa_context = shrm->isa_context; + u8 m; + + mutex_lock(&isa_lock); + m = iminor(filp->f_path.dentry->d_inode); + dev_dbg(shrm->dev, "%s IN %d", __func__, m); + + if (atomic_dec_and_test(&isa_context->is_open[m])) { + atomic_inc(&isa_context->is_open[m]); + dev_err(shrm->dev, "Device not opened yet\n"); + mutex_unlock(&isa_lock); + return -ENODEV; + } + atomic_set(&isa_context->is_open[m], 1); + + dev_dbg(shrm->dev, "isadev->device_id %d", isadev->device_id); + dev_dbg(shrm->dev, "Closed %d device\n", m); + + if (m == ISI_MESSAGING) + dev_dbg(shrm->dev, "Closed ISI_MESSAGING Device\n"); + else if (m == RPC_MESSAGING) + dev_dbg(shrm->dev, "Closed RPC_MESSAGING Device\n"); + else if (m == AUDIO_MESSAGING) + dev_dbg(shrm->dev, "Closed AUDIO_MESSAGING Device\n"); + else if (m == SECURITY_MESSAGING) + dev_dbg(shrm->dev, "Closed SECURITY_MESSAGING Device\n"); + else + dev_dbg(shrm->dev, NAME ":No such device present\n"); + + mutex_unlock(&isa_lock); + return 0; +} +/** + * isa_open() - Open device file + * + * @inode: structure is used by the kernel internally to represent files + * @filp: device file descriptor + * + * This function performs initialization tasks needed to open SHM channel. + * Following tasks are performed. + * -return if device is already opened + * -create uplink FIFO + * -create downlink FIFO + * -init delayed workqueue thread + * -register to l2mux driver + */ +static int isa_open(struct inode *inode, struct file *filp) +{ + int err = 0; + u8 m; + struct isadev_context *isadev; + struct isa_driver_context *isa_context = container_of( + inode->i_cdev, + struct isa_driver_context, + cdev); + struct shrm_dev *shrm = isa_context->isadev->dl_queue.shrm; + + dev_dbg(shrm->dev, "%s IN\n", __func__); + + if (get_boot_state() != BOOT_DONE) { + dev_err(shrm->dev, "Boot is not done\n"); + return -EBUSY; + } + mutex_lock(&isa_lock); + m = iminor(inode); + + if ((m != ISI_MESSAGING) && (m != RPC_MESSAGING) && + (m != AUDIO_MESSAGING) && (m != SECURITY_MESSAGING)) { + dev_err(shrm->dev, "No such device present\n"); + mutex_unlock(&isa_lock); + return -ENODEV; + } + if (!atomic_dec_and_test(&isa_context->is_open[m])) { + atomic_inc(&isa_context->is_open[m]); + dev_err(shrm->dev, "Device already opened\n"); + mutex_unlock(&isa_lock); + return -EBUSY; + } + + if (m == ISI_MESSAGING) + dev_dbg(shrm->dev, "Open ISI_MESSAGING Device\n"); + else if (m == RPC_MESSAGING) + dev_dbg(shrm->dev, "Open RPC_MESSAGING Device\n"); + else if (m == AUDIO_MESSAGING) + dev_dbg(shrm->dev, "Open AUDIO_MESSAGING Device\n"); + else if (m == SECURITY_MESSAGING) + dev_dbg(shrm->dev, "Open SECURITY_MESSAGING Device\n"); + else + dev_dbg(shrm->dev, ":No such device present\n"); + + isadev = &isa_context->isadev[m]; + if (filp != NULL) + filp->private_data = isadev; + + mutex_unlock(&isa_lock); + dev_dbg(shrm->dev, "%s OUT\n", __func__); + return err; +} + +const struct file_operations isa_fops = { + .owner = THIS_MODULE, + .open = isa_open, + .release = isa_close, + .ioctl = isa_ioctl, + .mmap = isa_mmap, + .read = isa_read, + .write = isa_write, + .poll = isa_select, +}; + +/** + * isa_init() - module insertion function + * + * This function registers module as a character driver using + * register_chrdev_region() or alloc_chrdev_region. It adds this + * driver to system using cdev_add() call. Major number is dynamically + * allocated using alloc_chrdev_region() by default or left to user to specify + * it during load time. For this variable major is used as module_param + * Nodes to be created using + * mknod /dev/isi c $major 0 + * mknod /dev/rpc c $major 1 + * mknod /dev/audio c $major 2 + * mknod /dev/sec c $major 3 + */ +int isa_init(struct shrm_dev *shrm) +{ + dev_t dev_id; + int retval, no_dev; + struct isadev_context *isadev; + struct isa_driver_context *isa_context; + + isa_context = kzalloc(sizeof(struct isa_driver_context), + GFP_KERNEL); + shrm->isa_context = isa_context; + if (isa_context == NULL) { + dev_err(shrm->dev, "Failed to alloc memory\n"); + return -ENOMEM; + } + + if (major) { + dev_id = MKDEV(major, 0); + retval = register_chrdev_region(dev_id, ISA_DEVICES, NAME); + } else { + retval = alloc_chrdev_region(&dev_id, 0, ISA_DEVICES, NAME); + major = MAJOR(dev_id); + } + + dev_dbg(shrm->dev, "major %d\n", major); + + cdev_init(&isa_context->cdev, &isa_fops); + isa_context->cdev.owner = THIS_MODULE; + retval = cdev_add(&isa_context->cdev, dev_id, ISA_DEVICES); + if (retval) { + dev_err(shrm->dev, "Failed to add char device\n"); + return retval; + } + + for (no_dev = 0; no_dev < ISA_DEVICES; no_dev++) + atomic_set(&isa_context->is_open[no_dev], 1); + + isa_context->isadev = kzalloc(sizeof + (struct isadev_context)*ISA_DEVICES, + GFP_KERNEL); + if (isa_context->isadev == NULL) { + dev_err(shrm->dev, "Failed to alloc memory\n"); + return -ENOMEM; + } + for (no_dev = 0; no_dev < ISA_DEVICES; no_dev++) { + isadev = &isa_context->isadev[no_dev]; + isadev->device_id = no_dev; + retval = create_queue(&isadev->dl_queue, + isadev->device_id, shrm); + if (retval < 0) { + dev_err(shrm->dev, "create dl_queue failed\n"); + delete_queue(&isadev->dl_queue); + kfree(isadev); + return retval; + } + } + mutex_init(&isa_context->tx_audio_mutex); + spin_lock_init(&isa_context->common_tx); + + dev_err(shrm->dev, "SHRM char driver added\n"); + + return retval; +} + +void isa_exit(struct shrm_dev *shrm) +{ + int no_dev; + struct isadev_context *isadev; + struct isa_driver_context *isa_context = shrm->isa_context; + dev_t dev_id = MKDEV(major, 0); + + for (no_dev = 0; no_dev < ISA_DEVICES; no_dev++) { + isadev = &isa_context->isadev[no_dev]; + delete_queue(&isadev->dl_queue); + kfree(isadev); + } + + cdev_del(&isa_context->cdev); + unregister_chrdev_region(dev_id, ISA_DEVICES); + kfree(isa_context); + + dev_err(shrm->dev, "SHRM char driver removed\n"); +} + +#ifdef CONFIG_HIGH_RES_TIMERS +static enum hrtimer_restart callback(struct hrtimer *timer) +{ + return HRTIMER_NORESTART; +} +#endif + + +static int __init shrm_probe(struct platform_device *pdev) +{ + int err = 0; + struct resource *res; + struct shrm_dev *shrm = NULL; + + if (pdev == NULL) { + dev_err(shrm->dev, + "No device/platform_data found on shm device\n"); + return -ENODEV; + } + + + shrm = kzalloc(sizeof(struct shrm_dev), GFP_KERNEL); + if (shrm == NULL) { + dev_err(shrm->dev, + "Could not allocate memory for struct shm_dev\n"); + return -ENOMEM; + } + shrm->dev = &pdev->dev; + + /* initialise the SHM */ + + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res) { + dev_err(shrm->dev, "Unable to map Ca Wake up interrupt\n"); + err = -EBUSY; + goto rollback_intr; + } + shrm->ca_wake_irq = res->start; + + res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); + if (!res) { + dev_err(shrm->dev, + "Unable to map APE_Read_notif_common IRQ base\n"); + err = -EBUSY; + goto rollback_intr; + } + shrm->ac_read_notif_0_irq = res->start; + + res = platform_get_resource(pdev, IORESOURCE_IRQ, 2); + if (!res) { + dev_err(shrm->dev, + "Unable to map APE_Read_notif_audio IRQ base\n"); + err = -EBUSY; + goto rollback_intr; + } + shrm->ac_read_notif_1_irq = res->start; + + res = platform_get_resource(pdev, IORESOURCE_IRQ, 3); + if (!res) { + dev_err(shrm->dev, + "Unable to map Cmt_msg_pending_notif_common IRQ base\n"); + err = -EBUSY; + goto rollback_intr; + } + shrm->ca_msg_pending_notif_0_irq = res->start; + + res = platform_get_resource(pdev, IORESOURCE_IRQ, 4); + if (!res) { + dev_err(shrm->dev, + "Unable to map Cmt_msg_pending_notif_audio IRQ base\n"); + err = -EBUSY; + goto rollback_intr; + } + shrm->ca_msg_pending_notif_1_irq = res->start; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(shrm->dev, + "Could not get SHM IO memory information\n"); + err = -ENODEV; + goto rollback_intr; + } + + shrm->intr_base = (void __iomem *)ioremap_nocache(res->start, + res->end - res->start + 1); + + if (!(shrm->intr_base)) { + dev_err(shrm->dev, "Unable to map register base\n"); + err = -EBUSY; + goto rollback_intr; + } + + shrm->ape_common_fifo_base_phy = + (u32 *)U8500_SHM_FIFO_APE_COMMON_BASE; + shrm->ape_common_fifo_base = + (void __iomem *)ioremap_nocache( + U8500_SHM_FIFO_APE_COMMON_BASE, + SHM_FIFO_0_SIZE); + shrm->ape_common_fifo_size = (SHM_FIFO_0_SIZE)/4; + + if (!(shrm->ape_common_fifo_base)) { + dev_err(shrm->dev, "Unable to map register base\n"); + err = -EBUSY; + goto rollback_ape_common_fifo_base; + } + + shrm->cmt_common_fifo_base_phy = + (u32 *)U8500_SHM_FIFO_CMT_COMMON_BASE; + + shrm->cmt_common_fifo_base = + (void __iomem *)ioremap_nocache( + U8500_SHM_FIFO_CMT_COMMON_BASE, SHM_FIFO_0_SIZE); + shrm->cmt_common_fifo_size = (SHM_FIFO_0_SIZE)/4; + + if (!(shrm->cmt_common_fifo_base)) { + dev_err(shrm->dev, "Unable to map register base\n"); + err = -EBUSY; + goto rollback_cmt_common_fifo_base; + } + + shrm->ape_audio_fifo_base_phy = + (u32 *)U8500_SHM_FIFO_APE_AUDIO_BASE; + shrm->ape_audio_fifo_base = + (void __iomem *)ioremap_nocache(U8500_SHM_FIFO_APE_AUDIO_BASE, + SHM_FIFO_1_SIZE); + shrm->ape_audio_fifo_size = (SHM_FIFO_1_SIZE)/4; + + if (!(shrm->ape_audio_fifo_base)) { + dev_err(shrm->dev, "Unable to map register base\n"); + err = -EBUSY; + goto rollback_ape_audio_fifo_base; + } + + shrm->cmt_audio_fifo_base_phy = + (u32 *)U8500_SHM_FIFO_CMT_AUDIO_BASE; + shrm->cmt_audio_fifo_base = + (void __iomem *)ioremap_nocache(U8500_SHM_FIFO_CMT_AUDIO_BASE, + SHM_FIFO_1_SIZE); + shrm->cmt_audio_fifo_size = (SHM_FIFO_1_SIZE)/4; + + if (!(shrm->cmt_audio_fifo_base)) { + dev_err(shrm->dev, "Unable to map register base\n"); + err = -EBUSY; + goto rollback_cmt_audio_fifo_base; + } + + shrm->ac_common_shared_wptr = + (void __iomem *)ioremap(SHM_ACFIFO_0_WRITE_AMCU, SHM_PTR_SIZE); + + if (!(shrm->ac_common_shared_wptr)) { + dev_err(shrm->dev, "Unable to map register base\n"); + err = -EBUSY; + goto rollback_ac_common_shared_wptr; + } + + shrm->ac_common_shared_rptr = + (void __iomem *)ioremap(SHM_ACFIFO_0_READ_AMCU, SHM_PTR_SIZE); + + if (!(shrm->ac_common_shared_rptr)) { + dev_err(shrm->dev, "Unable to map register base\n"); + err = -EBUSY; + goto rollback_map; + } + + + shrm->ca_common_shared_wptr = + (void __iomem *)ioremap(SHM_CAFIFO_0_WRITE_AMCU, SHM_PTR_SIZE); + + if (!(shrm->ca_common_shared_wptr)) { + dev_err(shrm->dev, "Unable to map register base\n"); + err = -EBUSY; + goto rollback_map; + } + + shrm->ca_common_shared_rptr = + (void __iomem *)ioremap(SHM_CAFIFO_0_READ_AMCU, SHM_PTR_SIZE); + + if (!(shrm->ca_common_shared_rptr)) { + dev_err(shrm->dev, "Unable to map register base\n"); + err = -EBUSY; + goto rollback_map; + } + + + shrm->ac_audio_shared_wptr = + (void __iomem *)ioremap(SHM_ACFIFO_1_WRITE_AMCU, SHM_PTR_SIZE); + + if (!(shrm->ac_audio_shared_wptr)) { + dev_err(shrm->dev, "Unable to map register base\n"); + err = -EBUSY; + goto rollback_map; + } + + + shrm->ac_audio_shared_rptr = + (void __iomem *)ioremap(SHM_ACFIFO_1_READ_AMCU, SHM_PTR_SIZE); + + if (!(shrm->ac_audio_shared_rptr)) { + dev_err(shrm->dev, "Unable to map register base\n"); + err = -EBUSY; + goto rollback_map; + } + + + shrm->ca_audio_shared_wptr = + (void __iomem *)ioremap(SHM_CAFIFO_1_WRITE_AMCU, SHM_PTR_SIZE); + + if (!(shrm->ca_audio_shared_wptr)) { + dev_err(shrm->dev, "Unable to map register base\n"); + err = -EBUSY; + goto rollback_map; + } + + + shrm->ca_audio_shared_rptr = + (void __iomem *)ioremap(SHM_CAFIFO_1_READ_AMCU, SHM_PTR_SIZE); + + if (!(shrm->ca_audio_shared_rptr)) { + dev_err(shrm->dev, "Unable to map register base\n"); + err = -EBUSY; + goto rollback_map; + } + + + if (isa_init(shrm) != 0) { + dev_err(shrm->dev, "Driver Initialization Error\n"); + err = -EBUSY; + } + /* install handlers and tasklets */ + if (shm_initialise_irq(shrm)) { + dev_err(shrm->dev, "shm error in interrupt registration\n"); + goto rollback_irq; + } + +#ifdef CONFIG_HIGH_RES_TIMERS + hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + timer.function = callback; + + hrtimer_start(&timer, ktime_set(0, 2*NSEC_PER_MSEC), HRTIMER_MODE_REL); +#endif + + return err; + +rollback_irq: + free_shm_irq(shrm); +rollback_map: + iounmap(shrm->ac_common_shared_wptr); + iounmap(shrm->ac_common_shared_rptr); + iounmap(shrm->ca_common_shared_wptr); + iounmap(shrm->ca_common_shared_rptr); + iounmap(shrm->ac_audio_shared_wptr); + iounmap(shrm->ac_audio_shared_rptr); + iounmap(shrm->ca_audio_shared_wptr); + iounmap(shrm->ca_audio_shared_rptr); +rollback_ac_common_shared_wptr: + iounmap(shrm->cmt_audio_fifo_base); +rollback_cmt_audio_fifo_base: + iounmap(shrm->ape_audio_fifo_base); +rollback_ape_audio_fifo_base: + iounmap(shrm->cmt_common_fifo_base); +rollback_cmt_common_fifo_base: + iounmap(shrm->ape_common_fifo_base); +rollback_ape_common_fifo_base: + iounmap(shrm->intr_base); +rollback_intr: + kfree(shrm); + return err; +} + +static int __exit shrm_remove(struct platform_device *pdev) +{ + struct shrm_dev *shrm = platform_get_drvdata(pdev); + + free_shm_irq(shrm); + iounmap(shrm->intr_base); + iounmap(shrm->ape_common_fifo_base); + iounmap(shrm->cmt_common_fifo_base); + iounmap(shrm->ape_audio_fifo_base); + iounmap(shrm->cmt_audio_fifo_base); + iounmap(shrm->ac_common_shared_wptr); + iounmap(shrm->ac_common_shared_rptr); + iounmap(shrm->ca_common_shared_wptr); + iounmap(shrm->ca_common_shared_rptr); + iounmap(shrm->ac_audio_shared_wptr); + iounmap(shrm->ac_audio_shared_rptr); + iounmap(shrm->ca_audio_shared_wptr); + iounmap(shrm->ca_audio_shared_rptr); + kfree(shrm); + isa_exit(shrm); + + return 0; +} +#ifdef CONFIG_PM + +/** + * u8500_shrm_suspend() - This routine puts the SHRM in to sustend state. + * @pdev: platform device. + * + * This routine checks the current ongoing communication with Modem by + * examining the ca_wake state and prevents suspend if modem communication + * is on-going. + * If ca_wake = 1 (high), modem comm. is on-going; don't suspend + * If ca_wake = 0 (low), no comm. with modem on-going.Allow suspend + */ +int u8500_shrm_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct shrm_dev *shrm = platform_get_drvdata(pdev); + + dev_dbg(shrm->dev, "%s called...\n", __func__); + dev_dbg(shrm->dev, "\n ca_wake_req_state = %x\n", + get_ca_wake_req_state()); + /* if ca_wake_req is high, prevent system suspend */ + if (get_ca_wake_req_state()) + return -EBUSY; + else + return 0; +} + +/** + * u8500_shrm_resume() - This routine resumes the SHRM from sustend state. + * @pdev: platform device. + * + * This routine restore back the current state of the SHRM + */ +int u8500_shrm_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct shrm_dev *shrm = platform_get_drvdata(pdev); + + dev_dbg(shrm->dev, "%s called...\n", __func__); + /* TODO: + * As of now, no state save takes place in suspend. + * So, nothing to restore in resume. + * Simply return as of now. + * State saved in suspend should be restored here. + */ + + return 0; +} + +static const struct dev_pm_ops shrm_dev_pm_ops = { + .suspend = u8500_shrm_suspend, + .resume = u8500_shrm_resume, +}; +#endif + +static struct platform_driver shrm_driver = { + .remove = __exit_p(shrm_remove), + .driver = { + .name = "u8500_shrm", + .owner = THIS_MODULE, +#ifdef CONFIG_PM + .pm = &shrm_dev_pm_ops, +#endif + }, +}; + +static int __init shrm_driver_init(void) +{ + return platform_driver_probe(&shrm_driver, shrm_probe); +} + +static void __exit shrm_driver_exit(void) +{ + platform_driver_unregister(&shrm_driver); +} + +module_init(shrm_driver_init); +module_exit(shrm_driver_exit); + +MODULE_AUTHOR("Biju Das"); +MODULE_DESCRIPTION("Shared Memory Modem Driver Interface"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/shrm/shrm_fifo.c b/drivers/misc/shrm/shrm_fifo.c new file mode 100644 index 00000000000..cbe0949a56d --- /dev/null +++ b/drivers/misc/shrm/shrm_fifo.c @@ -0,0 +1,827 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * Author: Biju Das <biju.das@stericsson.com> for ST-Ericsson + * Author: Kumar Sanghavi <kumar.sanghvi@stericsson.com> for ST-Ericsson + * Author: Arun Murthy <arun.murthy@stericsson.com> for ST-Ericsson + * License terms: GNU General Public License (GPL) version 2 + */ + +#include <mach/shrm.h> +#include <mach/shrm_driver.h> +#include <mach/shrm_private.h> +#include <mach/shrm_net.h> + +#define L1_BOOT_INFO_REQ 1 +#define L1_BOOT_INFO_RESP 2 +#define L1_NORMAL_MSG 3 +#define L1_HEADER_MASK 28 +#define L1_MAPID_MASK 0xF0000000 +#define CONFIG_OFFSET 8 +#define COUNTER_OFFSET 20 +#define L2_HEADER_SIZE 4 +#define L2_HEADER_OFFSET 24 +#define MASK_0_15_BIT 0xFF +#define MASK_16_31_BIT 0xFF00 +#define MASK_16_27_BIT 0xFFF0000 +#define MASK_0_39_BIT 0xFFFFF +#define MASK_40_55_BIT 0xFF00000 +#define MASK_8_16_BIT 0x0000FF00 +#define MSG_LEN_OFFSET 16 +#define SHRM_VER 2 +#define ca_ist_inactivity_timer 100 /*100ms */ +#define ca_csc_inactivity_timer 100 /*100ms */ + +static u8 msg_audio_counter; +static u8 msg_common_counter; + +struct fifo_write_params ape_shm_fifo_0; +struct fifo_write_params ape_shm_fifo_1; +struct fifo_read_params cmt_shm_fifo_0; +struct fifo_read_params cmt_shm_fifo_1; + + +static u8 cmt_read_notif_0_send; +static u8 cmt_read_notif_1_send; + +void shm_fifo_init(struct shrm_dev *shrm) +{ + ape_shm_fifo_0.writer_local_wptr = 0; + ape_shm_fifo_0.writer_local_rptr = 0; + *((u32 *)shrm->ac_common_shared_wptr) = 0; + *((u32 *)shrm->ac_common_shared_rptr) = 0; + ape_shm_fifo_0.shared_wptr = 0; + ape_shm_fifo_0.shared_rptr = 0; + ape_shm_fifo_0.availablesize = shrm->ape_common_fifo_size; + ape_shm_fifo_0.end_addr_fifo = shrm->ape_common_fifo_size; + ape_shm_fifo_0.fifo_virtual_addr = shrm->ape_common_fifo_base; + spin_lock_init(&ape_shm_fifo_0.fifo_update_lock); + + + cmt_shm_fifo_0.reader_local_rptr = 0; + cmt_shm_fifo_0.reader_local_wptr = 0; + cmt_shm_fifo_0.shared_wptr = + *((u32 *)shrm->ca_common_shared_wptr); + cmt_shm_fifo_0.shared_rptr = + *((u32 *)shrm->ca_common_shared_rptr); + cmt_shm_fifo_0.availablesize = shrm->cmt_common_fifo_size; + cmt_shm_fifo_0.end_addr_fifo = shrm->cmt_common_fifo_size; + cmt_shm_fifo_0.fifo_virtual_addr = shrm->cmt_common_fifo_base; + + ape_shm_fifo_1.writer_local_wptr = 0; + ape_shm_fifo_1.writer_local_rptr = 0; + ape_shm_fifo_1.shared_wptr = 0; + ape_shm_fifo_1.shared_rptr = 0; + *((u32 *)shrm->ac_audio_shared_wptr) = 0; + *((u32 *)shrm->ac_audio_shared_rptr) = 0; + ape_shm_fifo_1.availablesize = shrm->ape_audio_fifo_size; + ape_shm_fifo_1.end_addr_fifo = shrm->ape_audio_fifo_size; + ape_shm_fifo_1.fifo_virtual_addr = shrm->ape_audio_fifo_base; + spin_lock_init(&ape_shm_fifo_1.fifo_update_lock); + + cmt_shm_fifo_1.reader_local_rptr = 0; + cmt_shm_fifo_1.reader_local_wptr = 0; + cmt_shm_fifo_1.shared_wptr = + *((u32 *)shrm->ca_audio_shared_wptr); + cmt_shm_fifo_1.shared_rptr = + *((u32 *)shrm->ca_audio_shared_rptr); + cmt_shm_fifo_1.availablesize = shrm->cmt_audio_fifo_size; + cmt_shm_fifo_1.end_addr_fifo = shrm->cmt_audio_fifo_size; + cmt_shm_fifo_1.fifo_virtual_addr = shrm->cmt_audio_fifo_base; + msg_audio_counter = 0; + msg_common_counter = 0; +} + +u8 read_boot_info_req(struct shrm_dev *shrm, + u32 *config, + u32 *version) +{ + struct fifo_read_params *fifo = &cmt_shm_fifo_0; + u32 *msg; + u32 header = 0; + u8 msgtype; + + /* Read L1 header read content of reader_local_rptr */ + msg = (u32 *) + (fifo->reader_local_rptr + fifo->fifo_virtual_addr); + header = *msg; + msgtype = (header & L1_MAPID_MASK) >> L1_MSG_MAPID_OFFSET; + if (msgtype != L1_BOOT_INFO_REQ) { + dev_err(shrm->dev, "Read_Boot_Info_Req Fatal ERROR\n"); + BUG(); + } + *config = (header >> CONFIG_OFFSET) & MASK_0_15_BIT; + *version = header & MASK_0_15_BIT; + fifo->reader_local_rptr += 1; + + return 1; +} + +void write_boot_info_resp(struct shrm_dev *shrm, u32 config, + u32 version) +{ + struct fifo_write_params *fifo = &ape_shm_fifo_0; + u32 *msg; + u8 msg_length; + version = SHRM_VER; + + spin_lock_bh(&fifo->fifo_update_lock); + /* Read L1 header read content of reader_local_rptr */ + msg = (u32 *) + (fifo->writer_local_wptr+fifo->fifo_virtual_addr); + if (version < 1) { + *msg = ((L1_BOOT_INFO_RESP << L1_MSG_MAPID_OFFSET) | + ((config << CONFIG_OFFSET) & MASK_16_31_BIT) + | (version & MASK_0_15_BIT)); + msg_length = 1; + } else { + *msg = ((L1_BOOT_INFO_RESP << L1_MSG_MAPID_OFFSET) | + ((0x8 << MSG_LEN_OFFSET) & MASK_16_27_BIT) | + ((config << CONFIG_OFFSET) & MASK_8_16_BIT)| + version); + msg++; + *msg = ca_ist_inactivity_timer; + msg++; + *msg = ca_csc_inactivity_timer; + msg_length = L1_NORMAL_MSG; + } + fifo->writer_local_wptr += msg_length; + fifo->availablesize -= msg_length; + spin_unlock_bh(&fifo->fifo_update_lock); +} + +/** + * shm_write_msg_to_fifo() - write message to FIFO + * @shrm: pointer to shrm device information structure + * @channel: audio or common channel + * @l2header: L2 header or device ID + * @addr: pointer to write buffer address + * @length: length of mst to write + * + * Function Which Writes the data into Fifo in IPC zone + * It is called from shm_write_msg. This function will copy the msg + * from the kernel buffer to FIFO. There are 4 kernel buffers from where + * the data is to copied to FIFO one for each of the messages ISI, RPC, + * AUDIO and SECURITY. ISI, RPC and SECURITY messages are pushed to FIFO + * in commmon channel and AUDIO message is pushed onto audio channel FIFO. + */ +int shm_write_msg_to_fifo(struct shrm_dev *shrm, u8 channel, + u8 l2header, void *addr, u32 length) +{ + struct fifo_write_params *fifo = NULL; + u32 l1_header = 0, l2_header = 0; + u32 requiredsize; + u32 size = 0; + u32 *msg; + u8 *src; + + if (channel == COMMON_CHANNEL) + fifo = &ape_shm_fifo_0; + else if (channel == AUDIO_CHANNEL) + fifo = &ape_shm_fifo_1; + else { + dev_err(shrm->dev, "invalid channel\n"); + return -EINVAL; + } + + /* L2 size in 32b */ + requiredsize = ((length + 3) / 4); + /* Add size of L1 & L2 header */ + requiredsize += 2; + + /* if availablesize = or < requiredsize then error */ + if (fifo->availablesize <= requiredsize) { + /* Fatal ERROR - should never happens */ + dev_dbg(shrm->dev, "wr_wptr= %x\n", + fifo->writer_local_wptr); + dev_dbg(shrm->dev, "wr_rptr= %x\n", + fifo->writer_local_rptr); + dev_dbg(shrm->dev, "shared_wptr= %x\n", + fifo->shared_wptr); + dev_dbg(shrm->dev, "shared_rptr= %x\n", + fifo->shared_rptr); + dev_dbg(shrm->dev, "availsize= %x\n", + fifo->availablesize); + dev_dbg(shrm->dev, "end__fifo= %x\n", + fifo->end_addr_fifo); + dev_warn(shrm->dev, "Modem is busy, please wait." + " c_cnt = %d; a_cnt = %d\n", msg_common_counter, + msg_audio_counter); + if (channel == COMMON_CHANNEL) { + dev_warn(shrm->dev, + "Modem is lagging behind in reading." + "Stopping n/w dev queue\n"); + shrm_stop_netdev(shrm->ndev); + } + + return -EAGAIN; + } + + if (channel == COMMON_CHANNEL) { + /* build L1 header */ + l1_header = ((L1_NORMAL_MSG << L1_MSG_MAPID_OFFSET) | + (((msg_common_counter++) << COUNTER_OFFSET) + & MASK_40_55_BIT) | + ((length + L2_HEADER_SIZE) & MASK_0_39_BIT)); + } else if (channel == AUDIO_CHANNEL) { + /* build L1 header */ + l1_header = ((L1_NORMAL_MSG << L1_MSG_MAPID_OFFSET) | + (((msg_audio_counter++) << COUNTER_OFFSET) + & MASK_40_55_BIT) | + ((length + L2_HEADER_SIZE) & MASK_0_39_BIT)); + } + + /* + * Need to take care race condition for fifo->availablesize + * & fifo->writer_local_rptr with Ac_Read_notification interrupt. + * One option could be use stack variable for LocalRptr and recompute + * fifo->availablesize,based on flag enabled in the + * Ac_read_notification + */ + l2_header = ((l2header << L2_HEADER_OFFSET) | + ((length) & MASK_0_39_BIT)); + spin_lock_bh(&fifo->fifo_update_lock); + /* Check Local Rptr is less than or equal to Local WPtr */ + if (fifo->writer_local_rptr <= fifo->writer_local_wptr) { + msg = (u32 *) + (fifo->fifo_virtual_addr+fifo->writer_local_wptr); + + /* check enough place bewteen writer_local_wptr & end of FIFO */ + if ((fifo->end_addr_fifo-fifo->writer_local_wptr) >= + requiredsize) { + /* Add L1 header and L2 header */ + *msg = l1_header; + msg++; + *msg = l2_header; + msg++; + + /* copy the l2 message in 1 memcpy */ + memcpy((void *)msg, addr, length); + /* UpdateWptr */ + fifo->writer_local_wptr += requiredsize; + fifo->availablesize -= requiredsize; + fifo->writer_local_wptr %= fifo->end_addr_fifo; + } else { + /* + * message is split between and of FIFO and beg of FIFO + * copy first part from writer_local_wptr to end of FIFO + */ + size = fifo->end_addr_fifo-fifo->writer_local_wptr; + + if (size == 1) { + /* Add L1 header */ + *msg = l1_header; + msg++; + /* UpdateWptr */ + fifo->writer_local_wptr = 0; + fifo->availablesize -= size; + /* + * copy second part from beg of FIFO + * with remaining part of msg + */ + msg = (u32 *) + fifo->fifo_virtual_addr; + *msg = l2_header; + msg++; + + /* copy the l3 message in 1 memcpy */ + memcpy((void *)msg, addr, length); + /* UpdateWptr */ + fifo->writer_local_wptr += + requiredsize-size; + fifo->availablesize -= + (requiredsize-size); + } else if (size == 2) { + /* Add L1 header and L2 header */ + *msg = l1_header; + msg++; + *msg = l2_header; + msg++; + + /* UpdateWptr */ + fifo->writer_local_wptr = 0; + fifo->availablesize -= size; + + /* + * copy second part from beg of FIFO + * with remaining part of msg + */ + msg = (u32 *) + fifo->fifo_virtual_addr; + /* copy the l3 message in 1 memcpy */ + memcpy((void *)msg, addr, length); + + /* UpdateWptr */ + fifo->writer_local_wptr += + requiredsize-size; + fifo->availablesize -= + (requiredsize-size); + } else { + /* Add L1 header and L2 header */ + *msg = l1_header; + msg++; + *msg = l2_header; + msg++; + + /* copy the l2 message in 1 memcpy */ + memcpy((void *)msg, addr, (size-2)*4); + + + /* UpdateWptr */ + fifo->writer_local_wptr = 0; + fifo->availablesize -= size; + + /* + * copy second part from beg of FIFO + * with remaining part of msg + */ + msg = (u32 *)fifo->fifo_virtual_addr; + src = (u8 *)addr+((size - 2) * 4); + memcpy((void *)msg, src, + (length-((size - 2) * 4))); + + /* UpdateWptr */ + fifo->writer_local_wptr += + requiredsize-size; + fifo->availablesize -= + (requiredsize-size); + } + + } + } else { + /* writer_local_rptr > writer_local_wptr */ + msg = (u32 *) + (fifo->fifo_virtual_addr+fifo->writer_local_wptr); + /* Add L1 header and L2 header */ + *msg = l1_header; + msg++; + *msg = l2_header; + msg++; + /* + * copy message possbile between writer_local_wptr up + * to writer_local_rptr copy the l3 message in 1 memcpy + */ + memcpy((void *)msg, addr, length); + + /* UpdateWptr */ + fifo->writer_local_wptr += requiredsize; + fifo->availablesize -= requiredsize; + + } + spin_unlock_bh(&fifo->fifo_update_lock); + return length; +} + +/** + * read_one_l2msg_common() - read message from common channel + * @shrm: pointer to shrm device information structure + * @l2_msg: pointer to the read L2 message buffer + * @len: message length + * + * This function read one message from the FIFO and returns l2 header type + */ +u8 read_one_l2msg_common(struct shrm_dev *shrm, + u8 *l2_msg, u32 *len) +{ + struct fifo_read_params *fifo = &cmt_shm_fifo_0; + + u32 *msg; + u32 l1_header = 0; + u32 l2_header = 0; + u32 length; + u8 msgtype; + u32 msg_size; + u32 size = 0; + + /* Read L1 header read content of reader_local_rptr */ + msg = (u32 *) + (fifo->reader_local_rptr+fifo->fifo_virtual_addr); + l1_header = *msg++; + msgtype = (l1_header & 0xF0000000) >> L1_HEADER_MASK; + + if (msgtype != L1_NORMAL_MSG) { + /* Fatal ERROR - should never happens */ + dev_dbg(shrm->dev, "wr_wptr= %x\n", + fifo->reader_local_wptr); + dev_dbg(shrm->dev, "wr_rptr= %x\n", + fifo->reader_local_rptr); + dev_dbg(shrm->dev, "shared_wptr= %x\n", + fifo->shared_wptr); + dev_dbg(shrm->dev, "shared_rptr= %x\n", + fifo->shared_rptr); + dev_dbg(shrm->dev, "availsize= %x\n", + fifo->availablesize); + dev_dbg(shrm->dev, "end_fifo= %x\n", + fifo->end_addr_fifo); + /* Fatal ERROR - should never happens */ + dev_crit(shrm->dev, "Fatal ERROR - should never happen\n"); + BUG(); + } + if (fifo->reader_local_rptr == (fifo->end_addr_fifo-1)) { + l2_header = (*((u32 *)fifo->fifo_virtual_addr)); + length = l2_header & MASK_0_39_BIT; + } else { + /* Read L2 header,Msg size & content of reader_local_rptr */ + l2_header = *msg; + length = l2_header & MASK_0_39_BIT; + } + + *len = length; + msg_size = ((length + 3) / 4); + msg_size += 2; + + if (fifo->reader_local_rptr + msg_size <= + fifo->end_addr_fifo) { + /* Skip L2 header */ + msg++; + + /* read msg between reader_local_rptr and end of FIFO */ + memcpy((void *)l2_msg, (void *)msg, length); + /* UpdateLocalRptr */ + fifo->reader_local_rptr += msg_size; + fifo->reader_local_rptr %= fifo->end_addr_fifo; + } else { + /* + * msg split between end of FIFO and beg copy first + * part of msg read msg between reader_local_rptr + * and end of FIFO + */ + size = fifo->end_addr_fifo-fifo->reader_local_rptr; + if (size == 1) { + msg = (u32 *)(fifo->fifo_virtual_addr); + /* Skip L2 header */ + msg++; + memcpy((void *)l2_msg, (void *)(msg), length); + } else if (size == 2) { + /* Skip L2 header */ + msg++; + msg = (u32 *)(fifo->fifo_virtual_addr); + memcpy((void *)l2_msg, + (void *)(msg), length); + } else { + /* Skip L2 header */ + msg++; + memcpy((void *)l2_msg, (void *)msg, ((size - 2) * 4)); + /* copy second part of msg */ + l2_msg += ((size - 2) * 4); + msg = (u32 *)(fifo->fifo_virtual_addr); + memcpy((void *)l2_msg, (void *)(msg), + (length-((size - 2) * 4))); + } + fifo->reader_local_rptr = + (fifo->reader_local_rptr+msg_size) % + fifo->end_addr_fifo; + } + return (l2_header>>L2_HEADER_OFFSET) & MASK_0_15_BIT; + } + +u8 read_remaining_messages_common() +{ + struct fifo_read_params *fifo = &cmt_shm_fifo_0; + /* + * There won't be any Race condition reader_local_rptr & + * fifo->reader_local_wptr with CaMsgpending Notification Interrupt + */ + return ((fifo->reader_local_rptr != fifo->reader_local_wptr) ? 1 : 0); +} + +u8 read_one_l2msg_audio(struct shrm_dev *shrm, + u8 *l2_msg, u32 *len) +{ + struct fifo_read_params *fifo = &cmt_shm_fifo_1; + + u32 *msg; + u32 l1_header = 0; + u32 l2_header = 0; + u32 length; + u8 msgtype; + u32 msg_size; + u32 size = 0; + + /* Read L1 header read content of reader_local_rptr */ + msg = (u32 *) + (fifo->reader_local_rptr+fifo->fifo_virtual_addr); + l1_header = *msg++; + msgtype = (l1_header & 0xF0000000) >> L1_HEADER_MASK; + + if (msgtype != L1_NORMAL_MSG) { + /* Fatal ERROR - should never happens */ + dev_dbg(shrm->dev, "wr_local_wptr= %x\n", + fifo->reader_local_wptr); + dev_dbg(shrm->dev, "wr_local_rptr= %x\n", + fifo->reader_local_rptr); + dev_dbg(shrm->dev, "shared_wptr= %x\n", + fifo->shared_wptr); + dev_dbg(shrm->dev, "shared_rptr= %x\n", + fifo->shared_rptr); + dev_dbg(shrm->dev, "availsize=%x\n", + fifo->availablesize); + dev_dbg(shrm->dev, "end_fifo= %x\n", + fifo->end_addr_fifo); + /* Fatal ERROR - should never happens */ + dev_crit(shrm->dev, "Fatal ERROR - should never happen\n"); + BUG(); + } + if (fifo->reader_local_rptr == (fifo->end_addr_fifo-1)) { + l2_header = (*((u32 *)fifo->fifo_virtual_addr)); + length = l2_header & MASK_0_39_BIT; + } else { + /* Read L2 header,Msg size & content of reader_local_rptr */ + l2_header = *msg; + length = l2_header & MASK_0_39_BIT; + } + + *len = length; + msg_size = ((length + 3) / 4); + msg_size += 2; + + if (fifo->reader_local_rptr + msg_size <= + fifo->end_addr_fifo) { + /* Skip L2 header */ + msg++; + /* read msg between reader_local_rptr and end of FIFO */ + memcpy((void *)l2_msg, (void *)msg, length); + /* UpdateLocalRptr */ + fifo->reader_local_rptr += msg_size; + fifo->reader_local_rptr %= fifo->end_addr_fifo; + } else { + + /* + * msg split between end of FIFO and beg + * copy first part of msg + * read msg between reader_local_rptr and end of FIFO + */ + size = fifo->end_addr_fifo-fifo->reader_local_rptr; + if (size == 1) { + msg = (u32 *)(fifo->fifo_virtual_addr); + /* Skip L2 header */ + msg++; + memcpy((void *)l2_msg, (void *)(msg), length); + } else if (size == 2) { + /* Skip L2 header */ + msg++; + msg = (u32 *)(fifo->fifo_virtual_addr); + memcpy((void *)l2_msg, (void *)(msg), length); + } else { + /* Skip L2 header */ + msg++; + memcpy((void *)l2_msg, (void *)msg, ((size - 2) * 4)); + /* copy second part of msg */ + l2_msg += ((size - 2) * 4); + msg = (u32 *)(fifo->fifo_virtual_addr); + memcpy((void *)l2_msg, (void *)(msg), + (length-((size - 2) * 4))); + } + fifo->reader_local_rptr = + (fifo->reader_local_rptr+msg_size) % + fifo->end_addr_fifo; + + } + return (l2_header>>L2_HEADER_OFFSET) & MASK_0_15_BIT; + } + +u8 read_remaining_messages_audio() +{ + struct fifo_read_params *fifo = &cmt_shm_fifo_1; + + return ((fifo->reader_local_rptr != fifo->reader_local_wptr) ? + 1 : 0); +} + +u8 is_the_only_one_unread_message(struct shrm_dev *shrm, + u8 channel, u32 length) +{ + struct fifo_write_params *fifo = NULL; + u32 messagesize = 0; + u8 is_only_one_unread_msg = 0; + + if (channel == COMMON_CHANNEL) + fifo = &ape_shm_fifo_0; + else /* channel = AUDIO_CHANNEL */ + fifo = &ape_shm_fifo_1; + + /* L3 size in 32b */ + messagesize = ((length + 3) / 4); + /* Add size of L1 & L2 header */ + messagesize += 2; + /* + * possibility of race condition with Ac Read notification interrupt. + * need to check ? + */ + if (fifo->writer_local_wptr > fifo->writer_local_rptr) + is_only_one_unread_msg = + ((fifo->writer_local_rptr + messagesize) == + fifo->writer_local_wptr) ? 1 : 0; + else + /* Msg split between end of fifo and starting of Fifo */ + is_only_one_unread_msg = + (((fifo->writer_local_rptr + messagesize) % + fifo->end_addr_fifo) == fifo->writer_local_wptr) ? + 1 : 0; + + return is_only_one_unread_msg; +} + +void update_ca_common_local_wptr(struct shrm_dev *shrm) +{ + /* + * update CA common reader local write pointer with the + * shared write pointer + */ + struct fifo_read_params *fifo = &cmt_shm_fifo_0; + + fifo->shared_wptr = + (*((u32 *)shrm->ca_common_shared_wptr)); + fifo->reader_local_wptr = fifo->shared_wptr; +} + +void update_ca_audio_local_wptr(struct shrm_dev *shrm) +{ + /* + * update CA audio reader local write pointer with the + * shared write pointer + */ + struct fifo_read_params *fifo = &cmt_shm_fifo_1; + + fifo->shared_wptr = + (*((u32 *)shrm->ca_audio_shared_wptr)); + fifo->reader_local_wptr = fifo->shared_wptr; +} + +void update_ac_common_local_rptr(struct shrm_dev *shrm) +{ + /* + * update AC common writer local read pointer with the + * shared read pointer + */ + struct fifo_write_params *fifo; + u32 free_space = 0; + + fifo = &ape_shm_fifo_0; + + spin_lock_bh(&fifo->fifo_update_lock); + fifo->shared_rptr = + (*((u32 *)shrm->ac_common_shared_rptr)); + + if (fifo->shared_rptr >= fifo->writer_local_rptr) + free_space = + (fifo->shared_rptr-fifo->writer_local_rptr); + else { + free_space = + (fifo->end_addr_fifo-fifo->writer_local_rptr); + free_space += fifo->shared_rptr; + } + + /* Chance of race condition of below variables with write_msg */ + fifo->availablesize += free_space; + fifo->writer_local_rptr = fifo->shared_rptr; + spin_unlock_bh(&fifo->fifo_update_lock); +} + +void update_ac_audio_local_rptr(struct shrm_dev *shrm) +{ + /* + * update AC audio writer local read pointer with the + * shared read pointer + */ + struct fifo_write_params *fifo; + u32 free_space = 0; + + fifo = &ape_shm_fifo_1; + spin_lock_bh(&fifo->fifo_update_lock); + fifo->shared_rptr = + (*((u32 *)shrm->ac_audio_shared_rptr)); + + if (fifo->shared_rptr >= fifo->writer_local_rptr) + free_space = + (fifo->shared_rptr-fifo->writer_local_rptr); + else { + free_space = + (fifo->end_addr_fifo-fifo->writer_local_rptr); + free_space += fifo->shared_rptr; + } + + /* Chance of race condition of below variables with write_msg */ + fifo->availablesize += free_space; + fifo->writer_local_rptr = fifo->shared_rptr; + spin_unlock_bh(&fifo->fifo_update_lock); +} + +void update_ac_common_shared_wptr(struct shrm_dev *shrm) +{ + /* + * update AC common shared write pointer with the + * local write pointer + */ + struct fifo_write_params *fifo; + + fifo = &ape_shm_fifo_0; + spin_lock_bh(&fifo->fifo_update_lock); + /* Update shared pointer fifo offset of the IPC zone */ + (*((u32 *)shrm->ac_common_shared_wptr)) = + fifo->writer_local_wptr; + + fifo->shared_wptr = fifo->writer_local_wptr; + spin_unlock_bh(&fifo->fifo_update_lock); +} + +void update_ac_audio_shared_wptr(struct shrm_dev *shrm) +{ + /* + * update AC audio shared write pointer with the + * local write pointer + */ + struct fifo_write_params *fifo; + + fifo = &ape_shm_fifo_1; + spin_lock_bh(&fifo->fifo_update_lock); + /* Update shared pointer fifo offset of the IPC zone */ + (*((u32 *)shrm->ac_audio_shared_wptr)) = + fifo->writer_local_wptr; + fifo->shared_wptr = fifo->writer_local_wptr; + spin_unlock_bh(&fifo->fifo_update_lock); +} + +void update_ca_common_shared_rptr(struct shrm_dev *shrm) +{ + /* + * update CA common shared read pointer with the + * local read pointer + */ + struct fifo_read_params *fifo; + + fifo = &cmt_shm_fifo_0; + + /* Update shared pointer fifo offset of the IPC zone */ + (*((u32 *)shrm->ca_common_shared_rptr)) = + fifo->reader_local_rptr; + fifo->shared_rptr = fifo->reader_local_rptr; +} + +void update_ca_audio_shared_rptr(struct shrm_dev *shrm) +{ + /* + * update CA audio shared read pointer with the + * local read pointer + */ + struct fifo_read_params *fifo; + + fifo = &cmt_shm_fifo_1; + + /* Update shared pointer fifo offset of the IPC zone */ + (*((u32 *)shrm->ca_audio_shared_rptr)) = + fifo->reader_local_rptr; + fifo->shared_rptr = fifo->reader_local_rptr; +} + +void get_reader_pointers(u8 channel_type, u32 *reader_local_rptr, + u32 *reader_local_wptr, u32 *shared_rptr) +{ + struct fifo_read_params *fifo = NULL; + + if (channel_type == COMMON_CHANNEL) + fifo = &cmt_shm_fifo_0; + else /* channel_type = AUDIO_CHANNEL */ + fifo = &cmt_shm_fifo_1; + + *reader_local_rptr = fifo->reader_local_rptr; + *reader_local_wptr = fifo->reader_local_wptr; + *shared_rptr = fifo->shared_rptr; +} + +void get_writer_pointers(u8 channel_type, u32 *writer_local_rptr, + u32 *writer_local_wptr, u32 *shared_wptr) +{ + struct fifo_write_params *fifo = NULL; + + if (channel_type == COMMON_CHANNEL) + fifo = &ape_shm_fifo_0; + else /* channel_type = AUDIO_CHANNEL */ + fifo = &ape_shm_fifo_1; + + spin_lock_bh(&fifo->fifo_update_lock); + *writer_local_rptr = fifo->writer_local_rptr; + *writer_local_wptr = fifo->writer_local_wptr; + *shared_wptr = fifo->shared_wptr; + spin_unlock_bh(&fifo->fifo_update_lock); +} + +void set_ca_msg_0_read_notif_send(u8 val) +{ + cmt_read_notif_0_send = val; +} + +u8 get_ca_msg_0_read_notif_send(void) +{ + return cmt_read_notif_0_send; +} + +void set_ca_msg_1_read_notif_send(u8 val) +{ + cmt_read_notif_1_send = val; +} + +u8 get_ca_msg_1_read_notif_send(void) +{ + return cmt_read_notif_1_send; +} diff --git a/drivers/misc/shrm/shrm_protocol.c b/drivers/misc/shrm/shrm_protocol.c new file mode 100644 index 00000000000..0598c62812f --- /dev/null +++ b/drivers/misc/shrm/shrm_protocol.c @@ -0,0 +1,1194 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * Author: Biju Das <biju.das@stericsson.com> for ST-Ericsson + * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com> for ST-Ericsson + * Author: Arun Murthy <arun.murthy@stericsson.com> for ST-Ericsson + * License terms: GNU General Public License (GPL) version 2 + */ + +#include <linux/hrtimer.h> +#include <linux/delay.h> +#include <linux/netlink.h> +#include <linux/workqueue.h> + +#include <mach/shrm.h> +#include <mach/shrm_driver.h> +#include <mach/shrm_private.h> +#include <mach/shrm_net.h> +#include <mach/prcmu-fw-api.h> +#include <mach/prcmu-regs.h> +#include <mach/suspend.h> +#include <mach/reboot_reasons.h> + +#define L2_HEADER_ISI 0x0 +#define L2_HEADER_RPC 0x1 +#define L2_HEADER_AUDIO 0x2 +#define L2_HEADER_SECURITY 0x3 +#define L2_HEADER_COMMON_SIMPLE_LOOPBACK 0xC0 +#define L2_HEADER_COMMON_ADVANCED_LOOPBACK 0xC1 +#define L2_HEADER_AUDIO_SIMPLE_LOOPBACK 0x80 +#define L2_HEADER_AUDIO_ADVANCED_LOOPBACK 0x81 +#define MAX_PAYLOAD 1024 + +static u8 boot_state = BOOT_INIT; +static u8 recieve_common_msg[8*1024]; +static u8 recieve_audio_msg[8*1024]; +static received_msg_handler rx_common_handler; +static received_msg_handler rx_audio_handler; +static struct hrtimer timer; +static char is_earlydrop; +struct sock *shrm_nl_sk; + +static char shrm_common_tx_state = SHRM_SLEEP_STATE; +static char shrm_common_rx_state = SHRM_SLEEP_STATE; +static char shrm_audio_tx_state = SHRM_SLEEP_STATE; +static char shrm_audio_rx_state = SHRM_SLEEP_STATE; + +static atomic_t ac_sleep_disable_count = ATOMIC_INIT(0); +static struct shrm_dev *shm_dev; + +/* Spin lock and tasklet declaration */ +DECLARE_TASKLET(shm_ca_0_tasklet, shm_ca_msgpending_0_tasklet, 0); +DECLARE_TASKLET(shm_ca_1_tasklet, shm_ca_msgpending_1_tasklet, 0); +DECLARE_TASKLET(shm_ac_read_0_tasklet, shm_ac_read_notif_0_tasklet, 0); +DECLARE_TASKLET(shm_ac_read_1_tasklet, shm_ac_read_notif_1_tasklet, 0); + +static DEFINE_MUTEX(ac_state_mutex); + +static DEFINE_SPINLOCK(ca_common_lock); +static DEFINE_SPINLOCK(ca_audio_lock); +static DEFINE_SPINLOCK(ca_wake_req_lock); +static DEFINE_SPINLOCK(boot_lock); + +enum shrm_nl { + SHRM_NL_MOD_RESET = 1, + SHRM_NL_MOD_QUERY_STATE, + SHRM_NL_USER_MOD_RESET, + SHRM_NL_STATUS_MOD_ONLINE, + SHRM_NL_STATUS_MOD_OFFLINE, +}; + +static void shm_ac_sleep_req_work(struct work_struct *work) +{ + mutex_lock(&ac_state_mutex); + if (atomic_read(&ac_sleep_disable_count) == 0) + prcmu_ac_sleep_req(); + mutex_unlock(&ac_state_mutex); +} + +static void shm_ac_wake_req_work(struct work_struct *work) +{ + mutex_lock(&ac_state_mutex); + prcmu_ac_wake_req(); + mutex_unlock(&ac_state_mutex); +} + +static u32 get_host_accessport_val(void) +{ + u32 prcm_hostaccess; + + prcm_hostaccess = readl(PRCM_HOSTACCESS_REQ); + wmb(); + prcm_hostaccess = prcm_hostaccess & 0x01; + + return prcm_hostaccess; +} +static enum hrtimer_restart callback(struct hrtimer *timer) +{ + unsigned long flags; + + spin_lock_irqsave(&ca_wake_req_lock, flags); + if (((shrm_common_rx_state == SHRM_IDLE) || + (shrm_common_rx_state == SHRM_SLEEP_STATE)) + && ((shrm_common_tx_state == SHRM_IDLE) || + (shrm_common_tx_state == SHRM_SLEEP_STATE)) + && ((shrm_audio_rx_state == SHRM_IDLE) || + (shrm_audio_rx_state == SHRM_SLEEP_STATE)) + && ((shrm_audio_tx_state == SHRM_IDLE) || + (shrm_audio_tx_state == SHRM_SLEEP_STATE))) { + + shrm_common_rx_state = SHRM_SLEEP_STATE; + shrm_audio_rx_state = SHRM_SLEEP_STATE; + shrm_common_tx_state = SHRM_SLEEP_STATE; + shrm_audio_tx_state = SHRM_SLEEP_STATE; + + queue_work(shm_dev->shm_ac_sleep_wq, + &shm_dev->shm_ac_sleep_req); + + } + spin_unlock_irqrestore(&ca_wake_req_lock, flags); + + return HRTIMER_NORESTART; +} + +int nl_send_multicast_message(int msg, gfp_t gfp_mask) +{ + struct sk_buff *skb = NULL; + struct nlmsghdr *nlh = NULL; + int err; + + /* prepare netlink message */ + skb = alloc_skb(NLMSG_SPACE(MAX_PAYLOAD), gfp_mask); + if (!skb) { + dev_err(shm_dev->dev, "%s:alloc_skb failed\n", __func__); + err = -ENOMEM; + goto out; + } + + nlh = (struct nlmsghdr *)skb->data; + nlh->nlmsg_len = NLMSG_SPACE(MAX_PAYLOAD); + dev_dbg(shm_dev->dev, "nlh->nlmsg_len = %d\n", nlh->nlmsg_len); + + nlh->nlmsg_pid = 0; /* from kernel */ + nlh->nlmsg_flags = 0; + *(int *)NLMSG_DATA(nlh) = msg; + skb_put(skb, MAX_PAYLOAD); + /* sender is in group 1<<0 */ + NETLINK_CB(skb).pid = 0; /* from kernel */ + /* to mcast group 1<<0 */ + NETLINK_CB(skb).dst_group = 1; + + /*multicast the message to all listening processes*/ + err = netlink_broadcast(shrm_nl_sk, skb, 0, 1, gfp_mask); + dev_dbg(shm_dev->dev, "ret val from nl-multicast = %d\n", err); + +out: + return err; +} + +static void nl_send_unicast_message(int dst_pid) +{ + struct sk_buff *skb = NULL; + struct nlmsghdr *nlh = NULL; + int err; + int bt_state; + unsigned long flags; + + dev_info(shm_dev->dev, "Sending unicast message\n"); + + /* prepare the NL message for unicast */ + skb = alloc_skb(NLMSG_SPACE(MAX_PAYLOAD), GFP_KERNEL); + if (!skb) { + dev_err(shm_dev->dev, "%s:alloc_skb failed\n", __func__); + return; + } + + nlh = (struct nlmsghdr *)skb->data; + nlh->nlmsg_len = NLMSG_SPACE(MAX_PAYLOAD); + dev_dbg(shm_dev->dev, "nlh->nlmsg_len = %d\n", nlh->nlmsg_len); + + nlh->nlmsg_pid = 0; /* from kernel */ + nlh->nlmsg_flags = 0; + + spin_lock_irqsave(&boot_lock, flags); + bt_state = boot_state; + spin_unlock_irqrestore(&boot_lock, flags); + + if (bt_state == BOOT_DONE) + *(int *)NLMSG_DATA(nlh) = SHRM_NL_STATUS_MOD_ONLINE; + else + *(int *)NLMSG_DATA(nlh) = SHRM_NL_STATUS_MOD_OFFLINE; + + skb_put(skb, MAX_PAYLOAD); + /* sender is in group 1<<0 */ + NETLINK_CB(skb).pid = 0; /* from kernel */ + NETLINK_CB(skb).dst_group = 0; + + /*unicast the message to the querying processes*/ + err = netlink_unicast(shrm_nl_sk, skb, dst_pid, MSG_DONTWAIT); + dev_dbg(shm_dev->dev, "ret val from nl-unicast = %d\n", err); +} + + +static int check_modem_in_reset(void) +{ + u8 bt_state; + unsigned long flags; + + spin_lock_irqsave(&boot_lock, flags); + bt_state = boot_state; + spin_unlock_irqrestore(&boot_lock, flags); + +#ifdef CONFIG_U8500_SHRM_MODEM_SILENT_RESET + if (bt_state != BOOT_UNKNOWN) + return 0; + else + return -ENODEV; +#else + /* + * this check won't be applicable and won't work correctly + * if modem-silent-feature is not enabled + * so, simply return 0 + */ + return 0; +#endif +} + +void shm_ca_msgpending_0_tasklet(unsigned long tasklet_data) +{ + struct shrm_dev *shrm = (struct shrm_dev *)tasklet_data; + u32 reader_local_rptr; + u32 reader_local_wptr; + u32 shared_rptr; + u32 config = 0, version = 0; + unsigned long flags; + + dev_dbg(shrm->dev, "%s IN\n", __func__); + + /* Interprocess locking */ + spin_lock(&ca_common_lock); + + /* Update_reader_local_wptr with shared_wptr */ + update_ca_common_local_wptr(shrm); + get_reader_pointers(COMMON_CHANNEL, &reader_local_rptr, + &reader_local_wptr, &shared_rptr); + + set_ca_msg_0_read_notif_send(0); + + if (boot_state == BOOT_DONE) { + shrm_common_rx_state = SHRM_PTR_FREE; + + if (reader_local_rptr != shared_rptr) + ca_msg_read_notification_0(shrm); + if (reader_local_rptr != reader_local_wptr) + receive_messages_common(shrm); + get_reader_pointers(COMMON_CHANNEL, &reader_local_rptr, + &reader_local_wptr, &shared_rptr); + if (reader_local_rptr == reader_local_wptr) + shrm_common_rx_state = SHRM_IDLE; + } else { + /* BOOT phase.only a BOOT_RESP should be in FIFO */ + if (boot_state != BOOT_INFO_SYNC) { + if (!read_boot_info_req(shrm, &config, &version)) { + dev_err(shrm->dev, + "Unable to read boot state\n"); + BUG(); + } + /* SendReadNotification */ + ca_msg_read_notification_0(shrm); + /* + * Check the version number before + * sending Boot info response + */ + + /* send MsgPending notification */ + write_boot_info_resp(shrm, config, version); + spin_lock_irqsave(&boot_lock, flags); + boot_state = BOOT_INFO_SYNC; + spin_unlock_irqrestore(&boot_lock, flags); + dev_info(shrm->dev, "BOOT_INFO_SYNC\n"); + queue_work(shrm->shm_common_ch_wr_wq, + &shrm->send_ac_msg_pend_notify_0); + } else { + ca_msg_read_notification_0(shrm); + dev_info(shrm->dev, + "BOOT_INFO_SYNC\n"); + } + } + /* Interprocess locking */ + spin_unlock(&ca_common_lock); + dev_dbg(shrm->dev, "%s OUT\n", __func__); +} + +void shm_ca_msgpending_1_tasklet(unsigned long tasklet_data) +{ + struct shrm_dev *shrm = (struct shrm_dev *)tasklet_data; + u32 reader_local_rptr; + u32 reader_local_wptr; + u32 shared_rptr; + + /* + * This function is called when CaMsgPendingNotification Trigerred + * by CMU. It means that CMU has wrote a message into Ca Audio FIFO + */ + + dev_dbg(shrm->dev, "%s IN\n", __func__); + + if (check_modem_in_reset()) { + dev_err(shrm->dev, "%s:Modem state reset or unknown\n", + __func__); + return; + } + + /* Interprocess locking */ + spin_lock(&ca_audio_lock); + + /* Update_reader_local_wptr(with shared_wptr) */ + update_ca_audio_local_wptr(shrm); + get_reader_pointers(AUDIO_CHANNEL, &reader_local_rptr, + &reader_local_wptr, &shared_rptr); + + set_ca_msg_1_read_notif_send(0); + + if (boot_state != BOOT_DONE) { + dev_err(shrm->dev, "Boot Error\n"); + return; + } + shrm_audio_rx_state = SHRM_PTR_FREE; + /* Check we already read the message */ + if (reader_local_rptr != shared_rptr) + ca_msg_read_notification_1(shrm); + if (reader_local_rptr != reader_local_wptr) + receive_messages_audio(shrm); + + get_reader_pointers(AUDIO_CHANNEL, &reader_local_rptr, + &reader_local_wptr, &shared_rptr); + if (reader_local_rptr == reader_local_wptr) + shrm_audio_rx_state = SHRM_IDLE; + + /* Interprocess locking */ + spin_unlock(&ca_audio_lock); + dev_dbg(shrm->dev, "%s OUT\n", __func__); +} + +void shm_ac_read_notif_0_tasklet(unsigned long tasklet_data) +{ + struct shrm_dev *shrm = (struct shrm_dev *)tasklet_data; + u32 writer_local_rptr; + u32 writer_local_wptr; + u32 shared_wptr; + unsigned long flags; + + dev_dbg(shrm->dev, "%s IN\n", __func__); + + /* Update writer_local_rptrwith shared_rptr */ + update_ac_common_local_rptr(shrm); + get_writer_pointers(COMMON_CHANNEL, &writer_local_rptr, + &writer_local_wptr, &shared_wptr); + + if (check_modem_in_reset()) { + dev_err(shrm->dev, "%s:Modem state reset or unknown\n", + __func__); + return; + } + + if (boot_state == BOOT_INFO_SYNC) { + /* BOOT_RESP sent by APE has been received by CMT */ + spin_lock_irqsave(&boot_lock, flags); + boot_state = BOOT_DONE; + spin_unlock_irqrestore(&boot_lock, flags); + dev_info(shrm->dev, "IPC_ISA BOOT_DONE\n"); + + if (shrm->msr_flag) { + shrm_start_netdev(shrm->ndev); + shrm->msr_flag = 0; + + /* multicast that modem is online */ + nl_send_multicast_message(SHRM_NL_STATUS_MOD_ONLINE, GFP_ATOMIC); + } + + } else if (boot_state == BOOT_DONE) { + if (writer_local_rptr != writer_local_wptr) { + shrm_common_tx_state = SHRM_PTR_FREE; + queue_work(shrm->shm_common_ch_wr_wq, + &shrm->send_ac_msg_pend_notify_0); + } else { + shrm_common_tx_state = SHRM_IDLE; + shrm_restart_netdev(shrm->ndev); + } + } else { + dev_err(shrm->dev, "Invalid boot state\n"); + } + /* start timer here */ + hrtimer_start(&timer, ktime_set(0, 10*NSEC_PER_MSEC), + HRTIMER_MODE_REL); + atomic_dec(&ac_sleep_disable_count); + + dev_dbg(shrm->dev, "%s OUT\n", __func__); +} + +void shm_ac_read_notif_1_tasklet(unsigned long tasklet_data) +{ + struct shrm_dev *shrm = (struct shrm_dev *)tasklet_data; + u32 writer_local_rptr; + u32 writer_local_wptr; + u32 shared_wptr; + + dev_dbg(shrm->dev, "%s IN\n", __func__); + + if (check_modem_in_reset()) { + dev_err(shrm->dev, "%s:Modem state reset or unknown\n", + __func__); + return; + } + + /* Update writer_local_rptr(with shared_rptr) */ + update_ac_audio_local_rptr(shrm); + get_writer_pointers(AUDIO_CHANNEL, &writer_local_rptr, + &writer_local_wptr, &shared_wptr); + if (boot_state != BOOT_DONE) { + dev_err(shrm->dev, "Error Case in boot state\n"); + return; + } + if (writer_local_rptr != writer_local_wptr) { + shrm_audio_tx_state = SHRM_PTR_FREE; + queue_work(shrm->shm_audio_ch_wr_wq, + &shrm->send_ac_msg_pend_notify_1); + } else { + shrm_audio_tx_state = SHRM_IDLE; + } + /* start timer here */ + hrtimer_start(&timer, ktime_set(0, 10*NSEC_PER_MSEC), + HRTIMER_MODE_REL); + atomic_dec(&ac_sleep_disable_count); + + dev_dbg(shrm->dev, "%s OUT\n", __func__); +} + +void shm_ca_sleep_req_work(struct work_struct *work) +{ + dev_dbg(shm_dev->dev, "%s:IRQ_PRCMU_CA_SLEEP\n", __func__); + + shrm_common_rx_state = SHRM_IDLE; + shrm_audio_rx_state = SHRM_IDLE; + + writel((1<<GOP_CA_WAKE_ACK_BIT), + shm_dev->intr_base + GOP_SET_REGISTER_BASE); + + hrtimer_start(&timer, ktime_set(0, 10*NSEC_PER_MSEC), + HRTIMER_MODE_REL); +#ifdef CONFIG_UX500_SUSPEND + suspend_unblock_sleep(); +#endif + atomic_dec(&ac_sleep_disable_count); +} + +void shm_ca_wake_req_work(struct work_struct *work) +{ + struct shrm_dev *shrm = container_of(work, + struct shrm_dev, shm_ca_wake_req); + + /* initialize the FIFO Variables */ + if (boot_state == BOOT_INIT) + shm_fifo_init(shrm); + + mutex_lock(&ac_state_mutex); + prcmu_ac_wake_req(); + mutex_unlock(&ac_state_mutex); + + /* send ca_wake_ack_interrupt to CMU */ + if (!get_host_accessport_val()) + BUG(); + writel((1<<GOP_CA_WAKE_ACK_BIT), + shm_dev->intr_base + GOP_SET_REGISTER_BASE); +} +#ifdef CONFIG_U8500_SHRM_MODEM_SILENT_RESET +static int shrm_modem_reset_sequence(void) +{ + int err; + unsigned long flags; + + /* + * disable irqs + * very much needed for user-space initiated + * modem-reset + */ + disable_irq_nosync(shm_dev->ac_read_notif_0_irq); + disable_irq_nosync(shm_dev->ac_read_notif_1_irq); + disable_irq_nosync(shm_dev->ca_msg_pending_notif_0_irq); + disable_irq_nosync(shm_dev->ca_msg_pending_notif_1_irq); + disable_irq_nosync(IRQ_PRCMU_CA_WAKE); + disable_irq_nosync(IRQ_PRCMU_CA_SLEEP); + + + /* update the boot_state */ + spin_lock_irqsave(&boot_lock, flags); + boot_state = BOOT_UNKNOWN; + + /* + * put a barrier over here to make sure boot_state is updated + * else, it is seen that some of already executing modem + * irqs or tasklets fail the protocol checks and will ultimately + * try to acces the modem causing system to hang. + * This is particularly seen with user-space initiated modem reset + */ + wmb(); + spin_unlock_irqrestore(&boot_lock, flags); + + hrtimer_cancel(&timer); + + /* + * keep the count to 0 so that we can bring down the line + * for normal ac-wake and ac-sleep logic + */ + atomic_set(&ac_sleep_disable_count, 0); + + /* workaround for MSR */ + queue_work(shm_dev->shm_ac_wake_wq, + &shm_dev->shm_ac_wake_req); + + /* stop network queue */ + shrm_stop_netdev(shm_dev->ndev); + + /* reset char device queues */ + shrm_char_reset_queues(shm_dev); + + /* reset protocol states */ + shrm_common_tx_state = SHRM_SLEEP_STATE; + shrm_common_rx_state = SHRM_SLEEP_STATE; + shrm_audio_tx_state = SHRM_SLEEP_STATE; + shrm_audio_rx_state = SHRM_SLEEP_STATE; + + /* set the msr flag */ + shm_dev->msr_flag = 1; + + /* multicast that modem is going to reset */ + err = nl_send_multicast_message(SHRM_NL_MOD_RESET, GFP_ATOMIC); + + /* reset the boot state */ + spin_lock_irqsave(&boot_lock, flags); + boot_state = BOOT_INIT; + spin_unlock_irqrestore(&boot_lock, flags); + + /* re-enable irqs */ + enable_irq(shm_dev->ac_read_notif_0_irq); + enable_irq(shm_dev->ac_read_notif_1_irq); + enable_irq(shm_dev->ca_msg_pending_notif_0_irq); + enable_irq(shm_dev->ca_msg_pending_notif_1_irq); + enable_irq(IRQ_PRCMU_CA_WAKE); + enable_irq(IRQ_PRCMU_CA_SLEEP); + + return err; +} +#endif + +static void shrm_modem_reset_callback(unsigned long irq) +{ + dev_err(shm_dev->dev, "Received mod_reset_req interrupt\n"); + +#ifdef CONFIG_U8500_SHRM_MODEM_SILENT_RESET + { + int err; + dev_info(shm_dev->dev, "Initiating Modem silent reset\n"); + + err = shrm_modem_reset_sequence(); + if (err) + dev_err(shm_dev->dev, + "Failed multicast of modem reset\n"); + } +#else + dev_info(shm_dev->dev, "Modem in reset loop, doing System reset\n"); + + /* Call the PRCMU reset API */ + prcmu_system_reset(SW_RESET_NO_ARGUMENT); +#endif +} + +DECLARE_TASKLET(shrm_sw_reset_callback, shrm_modem_reset_callback, + IRQ_PRCMU_MODEM_SW_RESET_REQ); + +static irqreturn_t shrm_prcmu_irq_handler(int irq, void *data) +{ + struct shrm_dev *shrm = data; + + switch (irq) { + case IRQ_PRCMU_CA_WAKE: +#ifdef CONFIG_UX500_SUSPEND + suspend_block_sleep(); +#endif + if (shrm->msr_flag) + atomic_set(&ac_sleep_disable_count, 0); + atomic_inc(&ac_sleep_disable_count); + queue_work(shrm->shm_ca_wake_wq, &shrm->shm_ca_wake_req); + break; + case IRQ_PRCMU_CA_SLEEP: + queue_work(shrm->shm_ca_wake_wq, &shrm->shm_ca_sleep_req); + break; + case IRQ_PRCMU_MODEM_SW_RESET_REQ: + tasklet_schedule(&shrm_sw_reset_callback); + break; + default: + dev_err(shrm->dev, "%s: => IRQ %d\n", __func__, irq); + return IRQ_NONE; + } + return IRQ_HANDLED; +} + +static void send_ac_msg_pend_notify_0_work(struct work_struct *work) +{ + struct shrm_dev *shrm = container_of(work, struct shrm_dev, + send_ac_msg_pend_notify_0); + + dev_dbg(shrm->dev, "%s IN\n", __func__); + update_ac_common_shared_wptr(shrm); + + mutex_lock(&ac_state_mutex); + atomic_inc(&ac_sleep_disable_count); + prcmu_ac_wake_req(); + mutex_unlock(&ac_state_mutex); + + if (!get_host_accessport_val()) + BUG(); + + /* Trigger AcMsgPendingNotification to CMU */ + writel((1<<GOP_COMMON_AC_MSG_PENDING_NOTIFICATION_BIT), + shrm->intr_base + GOP_SET_REGISTER_BASE); + + if (shrm_common_tx_state == SHRM_PTR_FREE) + shrm_common_tx_state = SHRM_PTR_BUSY; + + dev_dbg(shrm->dev, "%s OUT\n", __func__); +} + +static void send_ac_msg_pend_notify_1_work(struct work_struct *work) +{ + struct shrm_dev *shrm = container_of(work, struct shrm_dev, + send_ac_msg_pend_notify_1); + + dev_dbg(shrm->dev, "%s IN\n", __func__); + /* Update shared_wptr with writer_local_wptr) */ + update_ac_audio_shared_wptr(shrm); + + mutex_lock(&ac_state_mutex); + atomic_inc(&ac_sleep_disable_count); + prcmu_ac_wake_req(); + mutex_unlock(&ac_state_mutex); + + if (!get_host_accessport_val()) + BUG(); + + /* Trigger AcMsgPendingNotification to CMU */ + writel((1<<GOP_AUDIO_AC_MSG_PENDING_NOTIFICATION_BIT), + shrm->intr_base + GOP_SET_REGISTER_BASE); + + if (shrm_audio_tx_state == SHRM_PTR_FREE) + shrm_audio_tx_state = SHRM_PTR_BUSY; + + dev_dbg(shrm->dev, "%s OUT\n", __func__); +} + +void shm_nl_receive(struct sk_buff *skb) +{ + struct nlmsghdr *nlh = NULL; + int msg; + + dev_dbg(shm_dev->dev, "Received NL msg from user-space\n"); + + nlh = (struct nlmsghdr *)skb->data; + msg = *((int *)(NLMSG_DATA(nlh))); + switch (msg) { + case SHRM_NL_MOD_QUERY_STATE: + dev_info(shm_dev->dev, "mod-query-state from user-space\n"); + nl_send_unicast_message(nlh->nlmsg_pid); + break; + + case SHRM_NL_USER_MOD_RESET: + dev_info(shm_dev->dev, "user-space inited mod-reset-req\n"); + dev_info(shm_dev->dev, "PCRMU resets modem\n"); + prcmu_modem_reset(); + break; + + default: + dev_err(shm_dev->dev, "Invalid NL msg from user-space\n"); + break; + }; +} + +int shrm_protocol_init(struct shrm_dev *shrm, + received_msg_handler common_rx_handler, + received_msg_handler audio_rx_handler) +{ + int err; + + shm_dev = shrm; + boot_state = BOOT_INIT; + dev_info(shrm->dev, "IPC_ISA BOOT_INIT\n"); + rx_common_handler = common_rx_handler; + rx_audio_handler = audio_rx_handler; + atomic_set(&ac_sleep_disable_count, 0); + + is_earlydrop = cpu_is_u8500ed(); + if (is_earlydrop != 0x01) { + hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + timer.function = callback; + } + + shrm->shm_common_ch_wr_wq = create_singlethread_workqueue + ("shm_common_channel_irq"); + if (!shrm->shm_common_ch_wr_wq) { + dev_err(shrm->dev, "failed to create work queue\n"); + return -ENOMEM; + } + shrm->shm_audio_ch_wr_wq = create_singlethread_workqueue + ("shm_audio_channel_irq"); + if (!shrm->shm_audio_ch_wr_wq) { + dev_err(shrm->dev, "failed to create work queue\n"); + err = -ENOMEM; + goto free_wq1; + } + shrm->shm_ac_wake_wq = create_singlethread_workqueue("shm_ac_wake_req"); + if (!shrm->shm_ac_wake_wq) { + dev_err(shrm->dev, "failed to create work queue\n"); + err = -ENOMEM; + goto free_wq2; + } + shrm->shm_ca_wake_wq = create_singlethread_workqueue("shm_ca_wake_req"); + if (!shrm->shm_ac_wake_wq) { + dev_err(shrm->dev, "failed to create work queue\n"); + err = -ENOMEM; + goto free_wq3; + } + shrm->shm_ac_sleep_wq = create_singlethread_workqueue + ("shm_ac_sleep_req"); + if (!shrm->shm_ac_sleep_wq) { + dev_err(shrm->dev, "failed to create work queue\n"); + err = -ENOMEM; + goto free_wq4; + } + INIT_WORK(&shrm->send_ac_msg_pend_notify_0, + send_ac_msg_pend_notify_0_work); + INIT_WORK(&shrm->send_ac_msg_pend_notify_1, + send_ac_msg_pend_notify_1_work); + INIT_WORK(&shrm->shm_ca_wake_req, shm_ca_wake_req_work); + INIT_WORK(&shrm->shm_ca_sleep_req, shm_ca_sleep_req_work); + INIT_WORK(&shrm->shm_ac_sleep_req, shm_ac_sleep_req_work); + INIT_WORK(&shrm->shm_ac_wake_req, shm_ac_wake_req_work); + + /* set tasklet data */ + shm_ca_0_tasklet.data = (unsigned long)shrm; + shm_ca_1_tasklet.data = (unsigned long)shrm; + + err = request_irq(IRQ_PRCMU_CA_SLEEP, shrm_prcmu_irq_handler, + IRQF_NO_SUSPEND, "ca-sleep", shrm); + if (err < 0) { + dev_err(shm_dev->dev, "Failed alloc IRQ_PRCMU_CA_SLEEP.\n"); + goto free_wq5; + } + + err = request_irq(IRQ_PRCMU_CA_WAKE, shrm_prcmu_irq_handler, + IRQF_NO_SUSPEND, "ca-wake", shrm); + if (err < 0) { + dev_err(shm_dev->dev, "Failed alloc IRQ_PRCMU_CA_WAKE.\n"); + goto drop2; + } + + err = request_irq(IRQ_PRCMU_MODEM_SW_RESET_REQ, shrm_prcmu_irq_handler, + IRQF_NO_SUSPEND, "modem-sw-reset-req", shrm); + if (err < 0) { + dev_err(shm_dev->dev, + "Failed alloc IRQ_PRCMU_MODEM_SW_RESET_REQ.\n"); + goto drop1; + } + +#ifdef CONFIG_U8500_SHRM_MODEM_SILENT_RESET + /* init netlink socket for user-space communication */ + shrm_nl_sk = netlink_kernel_create(NULL, NETLINK_SHRM, 1, + shm_nl_receive, NULL, THIS_MODULE); + + if (!shrm_nl_sk) { + dev_err(shm_dev->dev, "netlink socket creation failed\n"); + goto drop; + } +#endif + + return 0; + +#ifdef CONFIG_U8500_SHRM_MODEM_SILENT_RESET +drop: + free_irq(IRQ_PRCMU_MODEM_SW_RESET_REQ, NULL); +#endif +drop1: + free_irq(IRQ_PRCMU_CA_WAKE, NULL); +drop2: + free_irq(IRQ_PRCMU_CA_SLEEP, NULL); +free_wq5: + destroy_workqueue(shrm->shm_ac_sleep_wq); +free_wq4: + destroy_workqueue(shrm->shm_ca_wake_wq); +free_wq3: + destroy_workqueue(shrm->shm_ac_wake_wq); +free_wq2: + destroy_workqueue(shrm->shm_audio_ch_wr_wq); +free_wq1: + destroy_workqueue(shrm->shm_common_ch_wr_wq); + return err; +} + +void shrm_protocol_deinit(struct shrm_dev *shrm) +{ + free_irq(IRQ_PRCMU_CA_SLEEP, NULL); + free_irq(IRQ_PRCMU_CA_WAKE, NULL); + free_irq(IRQ_PRCMU_MODEM_SW_RESET_REQ, NULL); + flush_scheduled_work(); + destroy_workqueue(shrm->shm_common_ch_wr_wq); + destroy_workqueue(shrm->shm_audio_ch_wr_wq); + destroy_workqueue(shrm->shm_ac_wake_wq); + destroy_workqueue(shrm->shm_ca_wake_wq); + destroy_workqueue(shrm->shm_ac_sleep_wq); +} + +int get_ca_wake_req_state(void) +{ + return ((atomic_read(&ac_sleep_disable_count) > 0) || + prcmu_is_ac_wake_requested()); +} + +irqreturn_t ca_wake_irq_handler(int irq, void *ctrlr) +{ + struct shrm_dev *shrm = ctrlr; + + dev_dbg(shrm->dev, "%s IN\n", __func__); + /* initialize the FIFO Variables */ + if (boot_state == BOOT_INIT) + shm_fifo_init(shrm); + + dev_dbg(shrm->dev, "Inside ca_wake_irq_handler\n"); + + /* Clear the interrupt */ + writel((1 << GOP_CA_WAKE_REQ_BIT), + shrm->intr_base + GOP_CLEAR_REGISTER_BASE); + + /* send ca_wake_ack_interrupt to CMU */ + writel((1 << GOP_CA_WAKE_ACK_BIT), + shrm->intr_base + GOP_SET_REGISTER_BASE); + + + dev_dbg(shrm->dev, "%s OUT\n", __func__); + return IRQ_HANDLED; +} + + +irqreturn_t ac_read_notif_0_irq_handler(int irq, void *ctrlr) +{ + struct shrm_dev *shrm = ctrlr; + + dev_dbg(shrm->dev, "%s IN\n", __func__); + + if (check_modem_in_reset()) { + dev_err(shrm->dev, "%s:Modem state reset or unknown.\n", + __func__); + return IRQ_HANDLED; + } + + shm_ac_read_0_tasklet.data = (unsigned long)shrm; + tasklet_schedule(&shm_ac_read_0_tasklet); + + if (check_modem_in_reset()) { + dev_err(shrm->dev, "%s:Modem state reset or unknown.\n", + __func__); + return IRQ_HANDLED; + } + + /* Clear the interrupt */ + writel((1 << GOP_COMMON_AC_READ_NOTIFICATION_BIT), + shrm->intr_base + GOP_CLEAR_REGISTER_BASE); + + dev_dbg(shrm->dev, "%s OUT\n", __func__); + return IRQ_HANDLED; +} + +irqreturn_t ac_read_notif_1_irq_handler(int irq, void *ctrlr) +{ + struct shrm_dev *shrm = ctrlr; + + dev_dbg(shrm->dev, "%s IN+\n", __func__); + + if (check_modem_in_reset()) { + dev_err(shrm->dev, "%s:Modem state reset or unknown.\n", + __func__); + return IRQ_HANDLED; + } + + shm_ac_read_1_tasklet.data = (unsigned long)shrm; + tasklet_schedule(&shm_ac_read_1_tasklet); + + if (check_modem_in_reset()) { + dev_err(shrm->dev, "%s:Modem state reset or unknown.\n", + __func__); + return IRQ_HANDLED; + } + + /* Clear the interrupt */ + writel((1 << GOP_AUDIO_AC_READ_NOTIFICATION_BIT), + shrm->intr_base + GOP_CLEAR_REGISTER_BASE); + + dev_dbg(shrm->dev, "%s OUT\n", __func__); + return IRQ_HANDLED; +} + +irqreturn_t ca_msg_pending_notif_0_irq_handler(int irq, void *ctrlr) +{ + struct shrm_dev *shrm = ctrlr; + + dev_dbg(shrm->dev, "%s IN\n", __func__); + + if (check_modem_in_reset()) { + dev_err(shrm->dev, "%s:Modem state reset or unknown.\n", + __func__); + return IRQ_HANDLED; + } + + tasklet_schedule(&shm_ca_0_tasklet); + + if (check_modem_in_reset()) { + dev_err(shrm->dev, "%s:Modem state reset or unknown.\n", + __func__); + return IRQ_HANDLED; + } + + /* Clear the interrupt */ + writel((1 << GOP_COMMON_CA_MSG_PENDING_NOTIFICATION_BIT), + shrm->intr_base + GOP_CLEAR_REGISTER_BASE); + + dev_dbg(shrm->dev, "%s OUT\n", __func__); + return IRQ_HANDLED; +} + +irqreturn_t ca_msg_pending_notif_1_irq_handler(int irq, void *ctrlr) +{ + struct shrm_dev *shrm = ctrlr; + + dev_dbg(shrm->dev, "%s IN\n", __func__); + + if (check_modem_in_reset()) { + dev_err(shrm->dev, "%s:Modem state reset or unknown.\n", + __func__); + return IRQ_HANDLED; + } + + tasklet_schedule(&shm_ca_1_tasklet); + + if (check_modem_in_reset()) { + dev_err(shrm->dev, "%s:Modem state reset or unknown.\n", + __func__); + return IRQ_HANDLED; + } + + /* Clear the interrupt */ + writel((1<<GOP_AUDIO_CA_MSG_PENDING_NOTIFICATION_BIT), + shrm->intr_base+GOP_CLEAR_REGISTER_BASE); + + dev_dbg(shrm->dev, "%s OUT\n", __func__); + return IRQ_HANDLED; + +} + +/** + * shm_write_msg() - write message to shared memory + * @shrm: pointer to the shrm device information structure + * @l2_header: L2 header + * @addr: pointer to the message + * @length: length of the message to be written + * + * This function is called from net or char interface driver write operation. + * Prior to calling this function the message is copied from the user space + * buffer to the kernel buffer. This function based on the l2 header routes + * the message to the respective channel and FIFO. Then makes a call to the + * fifo write function where the message is written to the physical device. + */ +int shm_write_msg(struct shrm_dev *shrm, u8 l2_header, + void *addr, u32 length) +{ + u8 channel = 0; + int ret; + + dev_dbg(shrm->dev, "%s IN\n", __func__); + + if (boot_state != BOOT_DONE) { + dev_err(shrm->dev, + "error after boot done call this fn\n"); + ret = -ENODEV; + goto out; + } + + if ((l2_header == L2_HEADER_ISI) || + (l2_header == L2_HEADER_RPC) || + (l2_header == L2_HEADER_SECURITY) || + (l2_header == L2_HEADER_COMMON_SIMPLE_LOOPBACK) || + (l2_header == L2_HEADER_COMMON_ADVANCED_LOOPBACK)) { + channel = 0; + if (shrm_common_tx_state == SHRM_SLEEP_STATE) + shrm_common_tx_state = SHRM_PTR_FREE; + else if (shrm_common_tx_state == SHRM_IDLE) + shrm_common_tx_state = SHRM_PTR_FREE; + + } else if ((l2_header == L2_HEADER_AUDIO) || + (l2_header == L2_HEADER_AUDIO_SIMPLE_LOOPBACK) || + (l2_header == L2_HEADER_AUDIO_ADVANCED_LOOPBACK)) { + if (shrm_audio_tx_state == SHRM_SLEEP_STATE) + shrm_audio_tx_state = SHRM_PTR_FREE; + else if (shrm_audio_tx_state == SHRM_IDLE) + shrm_audio_tx_state = SHRM_PTR_FREE; + + channel = 1; + } else { + ret = -ENODEV; + goto out; + } + ret = shm_write_msg_to_fifo(shrm, channel, l2_header, addr, length); + if (ret < 0) { + dev_err(shrm->dev, "write message to fifo failed\n"); + return ret; + } + /* + * notify only if new msg copied is the only unread one + * otherwise it means that reading process is ongoing + */ + if (is_the_only_one_unread_message(shrm, channel, length)) { + + /* Send Message Pending Noitication to CMT */ + if (channel == 0) + queue_work(shrm->shm_common_ch_wr_wq, + &shrm->send_ac_msg_pend_notify_0); + else + queue_work(shrm->shm_audio_ch_wr_wq, + &shrm->send_ac_msg_pend_notify_1); + + } + + dev_dbg(shrm->dev, "%s OUT\n", __func__); + return 0; + +out: + return ret; +} + +void ca_msg_read_notification_0(struct shrm_dev *shrm) +{ + dev_dbg(shrm->dev, "%s IN\n", __func__); + + if (get_ca_msg_0_read_notif_send() == 0) { + update_ca_common_shared_rptr(shrm); + + if (check_modem_in_reset()) { + dev_err(shrm->dev, "%s:Modem state reset or unknown.\n", + __func__); + return; + } + + /* Trigger CaMsgReadNotification to CMU */ + writel((1 << GOP_COMMON_CA_READ_NOTIFICATION_BIT), + shrm->intr_base + GOP_SET_REGISTER_BASE); + set_ca_msg_0_read_notif_send(1); + shrm_common_rx_state = SHRM_PTR_BUSY; + } + + dev_dbg(shrm->dev, "%s OUT\n", __func__); +} + +void ca_msg_read_notification_1(struct shrm_dev *shrm) +{ + dev_dbg(shrm->dev, "%s IN\n", __func__); + + if (get_ca_msg_1_read_notif_send() == 0) { + update_ca_audio_shared_rptr(shrm); + + if (check_modem_in_reset()) { + dev_err(shrm->dev, "%s:Modem state reset or unknown.\n", + __func__); + return; + } + + /* Trigger CaMsgReadNotification to CMU */ + writel((1<<GOP_AUDIO_CA_READ_NOTIFICATION_BIT), + shrm->intr_base+GOP_SET_REGISTER_BASE); + set_ca_msg_1_read_notif_send(1); + shrm_audio_rx_state = SHRM_PTR_BUSY; + } + dev_dbg(shrm->dev, "%s OUT\n", __func__); +} + +/** + * receive_messages_common - receive common channnel msg from + * CMT(Cellular Mobile Terminal) + * @shrm: pointer to shrm device information structure + * + * The messages sent from CMT to APE are written to the respective FIFO + * and an interrupt is triggered by the CMT. This ca message pending + * interrupt calls this function. This function sends a read notification + * acknowledgement to the CMT and calls the common channel receive handler + * where the messsage is copied to the respective(ISI, RPC, SECURIT) queue + * based on the message l2 header. + */ +void receive_messages_common(struct shrm_dev *shrm) +{ + u8 l2_header; + u32 len; + + if (check_modem_in_reset()) { + dev_err(shrm->dev, "%s:Modem state reset or unknown.\n", + __func__); + return; + } + + l2_header = read_one_l2msg_common(shrm, recieve_common_msg, &len); + /* Send Recieve_Call_back to Upper Layer */ + if (!rx_common_handler) { + dev_err(shrm->dev, "common_rx_handler is Null\n"); + BUG(); + } + (*rx_common_handler)(l2_header, &recieve_common_msg, len, + shrm); + /* SendReadNotification */ + ca_msg_read_notification_0(shrm); + + while (read_remaining_messages_common()) { + if (check_modem_in_reset()) { + dev_err(shrm->dev, "%s:Modem state reset or unknown.\n", + __func__); + return; + } + + l2_header = read_one_l2msg_common(shrm, recieve_common_msg, + &len); + /* Send Recieve_Call_back to Upper Layer */ + (*rx_common_handler)(l2_header, + &recieve_common_msg, len, + shrm); + } +} + +/** + * receive_messages_audio() - receive audio message from CMT + * @shrm: pointer to shrm device information structure + * + * The messages sent from CMT to APE are written to the respective FIFO + * and an interrupt is triggered by the CMT. This ca message pending + * interrupt calls this function. This function sends a read notification + * acknowledgement to the CMT and calls the common channel receive handler + * where the messsage is copied to the audio queue. + */ +void receive_messages_audio(struct shrm_dev *shrm) +{ + u8 l2_header; + u32 len; + + if (check_modem_in_reset()) { + dev_err(shrm->dev, "%s:Modem state reset or unknown.\n", + __func__); + return; + } + + l2_header = read_one_l2msg_audio(shrm, recieve_audio_msg, &len); + /* Send Recieve_Call_back to Upper Layer */ + + if (!rx_audio_handler) { + dev_crit(shrm->dev, "audio_rx_handler is Null\n"); + BUG(); + } + (*rx_audio_handler)(l2_header, &recieve_audio_msg, + len, shrm); + + /* SendReadNotification */ + ca_msg_read_notification_1(shrm); + while (read_remaining_messages_audio()) { + if (check_modem_in_reset()) { + dev_err(shrm->dev, "%s:Modem state reset or unknown.\n", + __func__); + return; + } + + l2_header = read_one_l2msg_audio(shrm, + recieve_audio_msg, &len); + /* Send Recieve_Call_back to Upper Layer */ + (*rx_audio_handler)(l2_header, + &recieve_audio_msg, len, + shrm); + } +} + +u8 get_boot_state() +{ + return boot_state; +} diff --git a/drivers/misc/stm.c b/drivers/misc/stm.c new file mode 100644 index 00000000000..3782d837291 --- /dev/null +++ b/drivers/misc/stm.c @@ -0,0 +1,414 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Pierre Peiffer <pierre.peiffer@stericsson.com> for ST-Ericsson. + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/fs.h> +#include <linux/io.h> +#include <linux/miscdevice.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <mach/prcmu-regs.h> +#include <trace/stm.h> + +#define STM_CLOCK_SHIFT 6 +#define STM_CLOCK_MASK 0x1C0 +#define STM_ENABLE_MASK 0x23D +/* Software mode for all cores except PRCMU that doesn't support SW */ +#define STM_MMC_DEFAULT 0x20 + +/* STM Registers */ +#define STM_CR (stm.virtbase) +#define STM_MMC (stm.virtbase + 0x008) +#define STM_TER (stm.virtbase + 0x010) +#define STMPERIPHID0 (stm.virtbase + 0xFC0) +#define STMPERIPHID1 (stm.virtbase + 0xFC8) +#define STMPERIPHID2 (stm.virtbase + 0xFD0) +#define STMPERIPHID3 (stm.virtbase + 0xFD8) +#define STMPCELLID0 (stm.virtbase + 0xFE0) +#define STMPCELLID1 (stm.virtbase + 0xFE8) +#define STMPCELLID2 (stm.virtbase + 0xFF0) +#define STMPCELLID3 (stm.virtbase + 0xFF8) + +static struct stm_device { + struct stm_platform_data *pdata; + void __iomem *virtbase; + volatile struct stm_channel __iomem *channels; + /* Used to register the allocated channels */ + DECLARE_BITMAP(ch_bitmap, STM_NUMBER_OF_CHANNEL); +} stm; + +static DEFINE_MUTEX(lock); + +static char *mipi60; +module_param(mipi60, charp, S_IRUGO); +MODULE_PARM_DESC(mipi60, "Trace to output on probe2 of mipi60 " + "('ape' or 'none')"); + +static char *mipi34 = "modem"; +module_param(mipi34, charp, S_IRUGO); +MODULE_PARM_DESC(mipi34, "Trace to output on mipi34 ('ape' or 'modem')"); + +#define IS_APE_ON_MIPI34 (mipi34 && !strcmp(mipi34, "ape")) +#define IS_APE_ON_MIPI60 (mipi60 && !strcmp(mipi60, "ape")) + +static int stm_open(struct inode *inode, struct file *file) +{ + file->private_data = kzalloc(sizeof(stm.ch_bitmap), GFP_KERNEL); + if (file->private_data == NULL) + return -ENOMEM; + return 0; +} + +static int stm_release(struct inode *inode, struct file *filp) +{ + bitmap_andnot(stm.ch_bitmap, stm.ch_bitmap, filp->private_data, + STM_NUMBER_OF_CHANNEL); + kfree(filp->private_data); + return 0; +} + +static int stm_mmap(struct file *filp, struct vm_area_struct *vma) +{ + /* + * Don't allow a mapping that covers more than the STM channels + * 4096 == sizeof(struct stm_channels) + */ + if ((vma->vm_end - vma->vm_start) > SZ_4K) + return -EINVAL; + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + if (io_remap_pfn_range(vma, vma->vm_start, + stm.pdata->channels_phys_base>>PAGE_SHIFT, + SZ_4K, vma->vm_page_prot)) + return -EAGAIN ; + + return 0; +} + +void stm_disable_src(void) +{ + mutex_lock(&lock); + writel(0x0, STM_CR); /* stop clock */ + writel(STM_MMC_DEFAULT, STM_MMC); + writel(0x0, STM_TER); /* Disable cores */ + mutex_unlock(&lock); +} +EXPORT_SYMBOL(stm_disable_src); + +int stm_set_ckdiv(enum clock_div v) +{ + unsigned int val; + + mutex_lock(&lock); + val = readl(STM_CR); + val &= ~STM_CLOCK_MASK; + writel(val | ((v << STM_CLOCK_SHIFT) & STM_CLOCK_MASK), STM_CR); + mutex_unlock(&lock); + + return 0; +} +EXPORT_SYMBOL(stm_set_ckdiv); + +unsigned int stm_get_cr(void) +{ + return readl(STM_CR); +} +EXPORT_SYMBOL(stm_get_cr); + +int stm_enable_src(unsigned int v) +{ + unsigned int val; + mutex_lock(&lock); + val = readl(STM_CR); + val &= ~STM_CLOCK_MASK; + /* middle possible clock */ + writel(val & (STM_CLOCK_DIV8 << STM_CLOCK_SHIFT), STM_CR); + writel(STM_MMC_DEFAULT, STM_MMC); + writel((v & STM_ENABLE_MASK), STM_TER); + mutex_unlock(&lock); + return 0; +} +EXPORT_SYMBOL(stm_enable_src); + +static int stm_get_channel(struct file *filp, int __user *arg) +{ + int c, err; + + /* Look for a free channel */ + do { + c = find_first_zero_bit(stm.ch_bitmap, STM_NUMBER_OF_CHANNEL); + } while ((c < STM_NUMBER_OF_CHANNEL) + && test_and_set_bit(c, stm.ch_bitmap)); + + if (c < STM_NUMBER_OF_CHANNEL) { + /* One free found ! */ + err = put_user(c, arg); + if (err) { + clear_bit(c, stm.ch_bitmap); + } else { + /* Register it in the context of the file */ + unsigned long *local_bitmap = filp->private_data; + if (local_bitmap) + set_bit(c, local_bitmap); + } + } else { + err = -ENOMEM; + } + return err; +} + +static int stm_free_channel(struct file *filp, int channel) +{ + if ((channel < 0) || (channel >= STM_NUMBER_OF_CHANNEL)) + return -EINVAL; + clear_bit(channel, stm.ch_bitmap); + if (filp->private_data) + clear_bit(channel, filp->private_data); + return 0; +} + +static long stm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + int err = 0; + + switch (cmd) { + + case STM_DISABLE: + stm_disable_src(); + break; + + case STM_SET_CLOCK_DIV: + err = stm_set_ckdiv((enum clock_div) arg); + break; + + case STM_GET_CTRL_REG: + err = put_user(stm_get_cr(), (unsigned int *)arg); + break; + + case STM_ENABLE_SRC: + err = stm_enable_src(arg); + break; + + case STM_DISABLE_MIPI34_MODEM: + stm.pdata->ste_disable_modem_on_mipi34(); + break; + + case STM_ENABLE_MIPI34_MODEM: + stm.pdata->ste_enable_modem_on_mipi34(); + break; + + case STM_GET_FREE_CHANNEL: + err = stm_get_channel(filp, (int *)arg); + break; + + case STM_RELEASE_CHANNEL: + err = stm_free_channel(filp, arg); + break; + + default: + err = -EINVAL; + break; + } + + return err; +} + +#define DEFLLTFUN(size) \ + void stm_trace_##size(unsigned char channel, uint##size##_t data) \ + { \ + (__chk_io_ptr(&(stm.channels[channel].no_stamp##size))), \ + *(volatile uint##size##_t __force *) \ + (&(stm.channels[channel].no_stamp##size)) = data;\ + } \ + EXPORT_SYMBOL(stm_trace_##size); \ + void stm_tracet_##size(unsigned char channel, uint##size##_t data) \ + { \ + (__chk_io_ptr(&(stm.channels[channel].stamp##size))), \ + *(volatile uint##size##_t __force *) \ + (&(stm.channels[channel].stamp##size)) = data; } \ + EXPORT_SYMBOL(stm_tracet_##size) + +DEFLLTFUN(8); +DEFLLTFUN(16); +DEFLLTFUN(32); +DEFLLTFUN(64); + +static const struct file_operations stm_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = stm_ioctl, + .open = stm_open, + .llseek = no_llseek, + .release = stm_release, + .mmap = stm_mmap, +}; + +static struct miscdevice stm_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = STM_DEV_NAME, + .fops = &stm_fops +}; + +static int __devinit stm_probe(struct platform_device *pdev) +{ + int err; + + if (!pdev || !pdev->dev.platform_data) { + pr_alert("No device/platform_data found on STM driver\n"); + return -ENODEV; + } + + stm.pdata = pdev->dev.platform_data; + + /* Reserve channels if necessary */ + if (stm.pdata->channels_reserved) { + int i = 0; + while (stm.pdata->channels_reserved[i] != -1) { + set_bit(stm.pdata->channels_reserved[i], stm.ch_bitmap); + i++; + } + } + + err = misc_register(&stm_misc); + if (err) { + dev_alert(&pdev->dev, "Unable to register misc driver!\n"); + return err; + } + + stm.virtbase = ioremap_nocache(stm.pdata->regs_phys_base, SZ_4K); + if (stm.virtbase == NULL) { + err = -EIO; + dev_err(&pdev->dev, "could not remap STM Register\n"); + goto fail_init; + } + + stm.channels = + ioremap_nocache(stm.pdata->channels_phys_base, + STM_NUMBER_OF_CHANNEL*sizeof(*stm.channels)); + if (stm.channels == NULL) { + dev_err(&pdev->dev, "could not remap STM Msg register\n"); + goto fail_init; + } + + /* Check chip IDs if necessary */ + if (stm.pdata->periph_id && stm.pdata->cell_id) { + u32 periph_id, cell_id; + + periph_id = (readb(STMPERIPHID0)<<24) + + (readb(STMPERIPHID1)<<16) + + (readb(STMPERIPHID2)<<8) + + readb(STMPERIPHID3); + cell_id = (readb(STMPCELLID0)<<24) + + (readb(STMPCELLID1)<<16) + + (readb(STMPCELLID2)<<8) + + readb(STMPCELLID3); + /* Ignore periph id2 field verification */ + if ((stm.pdata->periph_id & 0xFFFF00FF) + != (periph_id & 0xFFFF00FF) || + stm.pdata->cell_id != cell_id) { + dev_err(&pdev->dev, "STM-Trace not supported\n"); + dev_err(&pdev->dev, "periph_id=%x\n", periph_id); + dev_err(&pdev->dev, "pcell_id=%x\n", cell_id); + err = -EINVAL; + goto fail_init; + } + } + + if (IS_APE_ON_MIPI60) { + if (IS_APE_ON_MIPI34) { + dev_info(&pdev->dev, "Can't not enable APE trace on " + "mipi34 and mipi60-probe2: disabling APE on" + " mipi34\n"); + mipi34 = "modem"; + } + if (stm.pdata->ste_gpio_enable_ape_modem_mipi60) { + err = stm.pdata->ste_gpio_enable_ape_modem_mipi60(); + if (err) + dev_err(&pdev->dev, "can't enable MIPI60\n"); + } + } + + if (IS_APE_ON_MIPI34) { + if (stm.pdata->ste_disable_modem_on_mipi34) + stm.pdata->ste_disable_modem_on_mipi34(); + } else { + if (stm.pdata->ste_enable_modem_on_mipi34) + stm.pdata->ste_enable_modem_on_mipi34(); + } + + if (stm.pdata->ste_gpio_enable_mipi34) { + err = stm.pdata->ste_gpio_enable_mipi34(); + if (err) { + dev_err(&pdev->dev, "failed to set GPIO_ALT_TRACE\n"); + goto fail_init; + } + } + + if (stm.pdata->masters_enabled) + stm_enable_src(stm.pdata->masters_enabled); + dev_info(&pdev->dev, "STM-Trace driver probed successfully\n"); + return 0; +fail_init: + if (stm.virtbase) + iounmap(stm.virtbase); + + if (stm.channels) + iounmap(stm.channels); + misc_deregister(&stm_misc); + + return err; +} + +static int __devexit stm_remove(struct platform_device *pdev) +{ + struct stm_platform_data *pdata; + pdata = pdev->dev.platform_data; + + if (pdata->ste_gpio_disable_mipi34) + pdata->ste_gpio_disable_mipi34(); + + stm_disable_src(); + + if (stm.virtbase) + iounmap(stm.virtbase); + + if (stm.channels) + iounmap(stm.channels); + + misc_deregister(&stm_misc); + return 0; +} + +static struct platform_driver stm_driver = { + .probe = stm_probe, + .remove = __devexit_p(stm_remove), + .driver = { + .name = "stm", + .owner = THIS_MODULE, + } +}; + +static int __init stm_init(void) +{ + return platform_driver_register(&stm_driver); +} + +static void __exit stm_exit(void) +{ + platform_driver_unregister(&stm_driver); +} + +module_init(stm_init); +module_exit(stm_exit); + +MODULE_AUTHOR("Paul Ghaleb - ST Microelectronics"); +MODULE_AUTHOR("Pierre Peiffer - ST-Ericsson"); +MODULE_DESCRIPTION("Ux500 System Trace Module driver"); +MODULE_ALIAS("stm"); +MODULE_ALIAS("stm-trace"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/uid_stat.c b/drivers/misc/uid_stat.c new file mode 100644 index 00000000000..f5fd7038779 --- /dev/null +++ b/drivers/misc/uid_stat.c @@ -0,0 +1,186 @@ +/* drivers/misc/uid_stat.c + * + * Copyright (C) 2008 - 2009 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <asm/atomic.h> + +#include <linux/err.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/proc_fs.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/stat.h> +#include <linux/uid_stat.h> +#include <net/activity_stats.h> + +static DEFINE_SPINLOCK(uid_lock); +static LIST_HEAD(uid_list); +static struct proc_dir_entry *parent; + +struct uid_stat { + struct list_head link; + uid_t uid; + atomic_t tcp_rcv; + atomic_t tcp_snd; + atomic_t tcp_rcv_pkt; + atomic_t tcp_snd_pkt; + atomic_t udp_rcv; + atomic_t udp_snd; + atomic_t udp_rcv_pkt; + atomic_t udp_snd_pkt; +}; + +static int read_proc_entry(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + int len; + unsigned int value; + char *p = page; + atomic_t *uid_entry = (atomic_t *) data; + if (!data) + return 0; + + value = (unsigned int) (atomic_read(uid_entry) + INT_MIN); + p += sprintf(p, "%u\n", value); + len = (p - page) - off; + *eof = (len <= count) ? 1 : 0; + *start = page + off; + return len; +} + +/* Find or create a new entry for tracking the specified uid. */ +static struct uid_stat *get_uid_stat(uid_t uid) { + unsigned long flags; + struct uid_stat *uid_entry; + struct uid_stat *new_uid; + struct proc_dir_entry *proc_entry; + char uid_s[32]; + + spin_lock_irqsave(&uid_lock, flags); + list_for_each_entry(uid_entry, &uid_list, link) { + if (uid_entry->uid == uid) { + spin_unlock_irqrestore(&uid_lock, flags); + return uid_entry; + } + } + spin_unlock_irqrestore(&uid_lock, flags); + + /* Create a new entry for tracking the specified uid. */ + if ((new_uid = kmalloc(sizeof(struct uid_stat), GFP_KERNEL)) == NULL) + return NULL; + + new_uid->uid = uid; + /* Counters start at INT_MIN, so we can track 4GB of network traffic. */ + atomic_set(&new_uid->tcp_rcv, INT_MIN); + atomic_set(&new_uid->tcp_snd, INT_MIN); + atomic_set(&new_uid->tcp_snd_pkt, INT_MIN); + atomic_set(&new_uid->tcp_rcv_pkt, INT_MIN); + atomic_set(&new_uid->udp_rcv, INT_MIN); + atomic_set(&new_uid->udp_snd, INT_MIN); + atomic_set(&new_uid->udp_snd_pkt, INT_MIN); + atomic_set(&new_uid->udp_rcv_pkt, INT_MIN); + + /* Append the newly created uid stat struct to the list. */ + spin_lock_irqsave(&uid_lock, flags); + list_add_tail(&new_uid->link, &uid_list); + spin_unlock_irqrestore(&uid_lock, flags); + + sprintf(uid_s, "%d", uid); + proc_entry = proc_mkdir(uid_s, parent); + + /* Keep reference to uid_stat so we know what uid to read stats from. */ + create_proc_read_entry("tcp_snd", S_IRUGO, proc_entry, read_proc_entry, + (void *) &new_uid->tcp_snd); + + create_proc_read_entry("tcp_rcv", S_IRUGO, proc_entry, read_proc_entry, + (void *) &new_uid->tcp_rcv); + + create_proc_read_entry("tcp_snd_pkt", S_IRUGO, proc_entry, read_proc_entry, + (void *) &new_uid->tcp_snd_pkt); + + create_proc_read_entry("tcp_rcv_pkt", S_IRUGO, proc_entry, read_proc_entry, + (void *) &new_uid->tcp_rcv_pkt); + + create_proc_read_entry("udp_snd", S_IRUGO, proc_entry, read_proc_entry, + (void *) &new_uid->udp_snd); + + create_proc_read_entry("udp_rcv", S_IRUGO, proc_entry, read_proc_entry, + (void *) &new_uid->udp_rcv); + + create_proc_read_entry("udp_snd_pkt", S_IRUGO, proc_entry, read_proc_entry, + (void *) &new_uid->udp_snd_pkt); + + create_proc_read_entry("udp_rcv_pkt", S_IRUGO, proc_entry, read_proc_entry, + (void *) &new_uid->udp_rcv_pkt); + + return new_uid; +} + +int uid_stat_tcp_snd(uid_t uid, int size) { + struct uid_stat *entry; + activity_stats_update(); + if ((entry = get_uid_stat(uid)) == NULL) { + return -1; + } + atomic_add(size, &entry->tcp_snd); + atomic_inc(&entry->tcp_snd_pkt); + return 0; +} + +int uid_stat_tcp_rcv(uid_t uid, int size) { + struct uid_stat *entry; + activity_stats_update(); + if ((entry = get_uid_stat(uid)) == NULL) { + return -1; + } + atomic_add(size, &entry->tcp_rcv); + atomic_inc(&entry->tcp_rcv_pkt); + return 0; +} + +int uid_stat_udp_snd(uid_t uid, int size) { + struct uid_stat *entry; + activity_stats_update(); + if ((entry = get_uid_stat(uid)) == NULL) { + return -1; + } + atomic_add(size, &entry->udp_snd); + atomic_inc(&entry->udp_snd_pkt); + return 0; +} + +int uid_stat_udp_rcv(uid_t uid, int size) { + struct uid_stat *entry; + activity_stats_update(); + if ((entry = get_uid_stat(uid)) == NULL) { + return -1; + } + atomic_add(size, &entry->udp_rcv); + atomic_inc(&entry->udp_rcv_pkt); + return 0; +} + +static int __init uid_stat_init(void) +{ + parent = proc_mkdir("uid_stat", NULL); + if (!parent) { + pr_err("uid_stat: failed to create proc entry\n"); + return -1; + } + return 0; +} + +__initcall(uid_stat_init); diff --git a/drivers/misc/wl127x-rfkill.c b/drivers/misc/wl127x-rfkill.c new file mode 100644 index 00000000000..f5b95152948 --- /dev/null +++ b/drivers/misc/wl127x-rfkill.c @@ -0,0 +1,121 @@ +/* + * Bluetooth TI wl127x rfkill power control via GPIO + * + * Copyright (C) 2009 Motorola, Inc. + * Copyright (C) 2008 Texas Instruments + * Initial code: Pavan Savoy <pavan.savoy@gmail.com> (wl127x_power.c) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/gpio.h> +#include <linux/rfkill.h> +#include <linux/platform_device.h> +#include <linux/wl127x-rfkill.h> + +static int wl127x_rfkill_set_power(void *data, enum rfkill_state state) +{ + int nshutdown_gpio = (int) data; + + switch (state) { + case RFKILL_STATE_UNBLOCKED: + gpio_set_value(nshutdown_gpio, 1); + break; + case RFKILL_STATE_SOFT_BLOCKED: + gpio_set_value(nshutdown_gpio, 0); + break; + default: + printk(KERN_ERR "invalid bluetooth rfkill state %d\n", state); + } + return 0; +} + +static int wl127x_rfkill_probe(struct platform_device *pdev) +{ + int rc = 0; + struct wl127x_rfkill_platform_data *pdata = pdev->dev.platform_data; + enum rfkill_state default_state = RFKILL_STATE_SOFT_BLOCKED; /* off */ + + rc = gpio_request(pdata->nshutdown_gpio, "wl127x_nshutdown_gpio"); + if (unlikely(rc)) + return rc; + + rc = gpio_direction_output(pdata->nshutdown_gpio, 0); + if (unlikely(rc)) + return rc; + + rfkill_set_default(RFKILL_TYPE_BLUETOOTH, default_state); + wl127x_rfkill_set_power(NULL, default_state); + + pdata->rfkill = rfkill_allocate(&pdev->dev, RFKILL_TYPE_BLUETOOTH); + if (unlikely(!pdata->rfkill)) + return -ENOMEM; + + pdata->rfkill->name = "wl127x"; + pdata->rfkill->state = default_state; + /* userspace cannot take exclusive control */ + pdata->rfkill->user_claim_unsupported = 1; + pdata->rfkill->user_claim = 0; + pdata->rfkill->data = (void *) pdata->nshutdown_gpio; + pdata->rfkill->toggle_radio = wl127x_rfkill_set_power; + + rc = rfkill_register(pdata->rfkill); + + if (unlikely(rc)) + rfkill_free(pdata->rfkill); + + return 0; +} + +static int wl127x_rfkill_remove(struct platform_device *pdev) +{ + struct wl127x_rfkill_platform_data *pdata = pdev->dev.platform_data; + + rfkill_unregister(pdata->rfkill); + rfkill_free(pdata->rfkill); + gpio_free(pdata->nshutdown_gpio); + + return 0; +} + +static struct platform_driver wl127x_rfkill_platform_driver = { + .probe = wl127x_rfkill_probe, + .remove = wl127x_rfkill_remove, + .driver = { + .name = "wl127x-rfkill", + .owner = THIS_MODULE, + }, +}; + +static int __init wl127x_rfkill_init(void) +{ + return platform_driver_register(&wl127x_rfkill_platform_driver); +} + +static void __exit wl127x_rfkill_exit(void) +{ + platform_driver_unregister(&wl127x_rfkill_platform_driver); +} + +module_init(wl127x_rfkill_init); +module_exit(wl127x_rfkill_exit); + +MODULE_ALIAS("platform:wl127x"); +MODULE_DESCRIPTION("wl127x-rfkill"); +MODULE_AUTHOR("Motorola"); +MODULE_LICENSE("GPL"); |