summaryrefslogtreecommitdiff
path: root/drivers/misc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/misc')
-rw-r--r--drivers/misc/Kconfig74
-rw-r--r--drivers/misc/Kconfig.stm120
-rw-r--r--drivers/misc/Makefile10
-rw-r--r--drivers/misc/ab8500-pwm.c211
-rw-r--r--drivers/misc/bh1780gli.c192
-rw-r--r--drivers/misc/clonedev/Makefile5
-rw-r--r--drivers/misc/clonedev/clonedev.c312
-rw-r--r--drivers/misc/compdev/Makefile6
-rw-r--r--drivers/misc/compdev/compdev.c1381
-rw-r--r--drivers/misc/db8500-modem-trace.c273
-rw-r--r--drivers/misc/dbx500-mloader.c288
-rw-r--r--drivers/misc/dispdev/Makefile1
-rw-r--r--drivers/misc/dispdev/dispdev.c659
-rw-r--r--drivers/misc/hwmem/Makefile3
-rw-r--r--drivers/misc/hwmem/cache_handler.c510
-rw-r--r--drivers/misc/hwmem/cache_handler.h61
-rw-r--r--drivers/misc/hwmem/contig_alloc.c571
-rw-r--r--drivers/misc/hwmem/hwmem-ioctl.c532
-rw-r--r--drivers/misc/hwmem/hwmem-main.c726
-rw-r--r--drivers/misc/mbox.c867
-rw-r--r--drivers/misc/mbox_channels-db5500.c1273
-rw-r--r--drivers/misc/modem_audio/Kconfig6
-rw-r--r--drivers/misc/modem_audio/Makefile2
-rw-r--r--drivers/misc/modem_audio/mad.c506
-rw-r--r--drivers/misc/sim_detect.c306
-rw-r--r--drivers/misc/stm.c850
26 files changed, 9694 insertions, 51 deletions
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index c7795096d43..4fbe2c3f0f9 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -451,6 +451,20 @@ config ARM_CHARLCD
line and the Linux version on the second line, but that's
still useful.
+config STE_TRACE_MODEM
+ tristate "DB8500 trace Modem"
+ depends on ARCH_U8500
+ default n
+ help
+ Select this option to enable modem tracing by APE
+
+config DBX500_MLOADER
+ tristate "Modem firmware loader for db8500"
+ default n
+ depends on UX500_SOC_DB8500 || UX500_SOC_DB5500
+ help
+ Provides a user interface to load modem firmware on dbx500 SOCs
+
config BMP085
tristate "BMP085 digital pressure sensor"
depends on I2C && SYSFS
@@ -461,6 +475,40 @@ config BMP085
To compile this driver as a module, choose M here: the
module will be called bmp085.
+config DISPDEV
+ bool "Display overlay device"
+ depends on FB_MCDE
+ default n
+ help
+ This driver provides a way to use a second overlay for a display (in
+ addition to the framebuffer). The device allows for registration of
+ userspace buffers to be used with the overlay.
+
+config COMPDEV
+ bool "Display composition device"
+ depends on FB_MCDE && HWMEM
+ default n
+ help
+ This driver provides a way to use several overlays for a display.
+ This driver replaces the use of the framebuffer The device allows
+ for posting userspace buffers to be used with the overlays.
+
+config CLONEDEV
+ bool "Display cloning device"
+ depends on FB_MCDE && HWMEM && COMPDEV
+ default n
+ help
+ This driver provides a way to clone content between two compdev
+ devices.
+
+config CLONEDEV_DEBUG
+ bool "Display cloning device debug"
+ depends on CLONEDEV
+ default n
+ help
+ This driver provides a way to clone content between two compdev
+ devices.
+
config PCH_PHUB
tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB"
depends on PCI
@@ -481,6 +529,30 @@ config PCH_PHUB
To compile this driver as a module, choose M here: the module will
be called pch_phub.
+config HWMEM
+ bool "Hardware memory driver"
+ default n
+ help
+ This driver provides a way to allocate contiguous system memory which
+ can be used by hardware. It also enables accessing hwmem allocated
+ memory buffers through a secure id which can be shared across processes.
+
+config U5500_MBOX
+ bool "Mailbox support"
+ depends on (UX500_SOC_DB5500 && U5500_MODEM_IRQ)
+ default y
+ help
+ Add support for U5500 mailbox communication with modem side
+
+config U8500_SIM_DETECT
+ bool "Sim hot swap detection support"
+ depends on (MODEM && UX500_SOC_DB8500)
+ default n
+ help
+ Add support for sim hot swap detection support in U8500.Driver
+ basically wakes up the modem if its sleeping when sim hot plug
+ in/out has happened.
+
config USB_SWITCH_FSA9480
tristate "FSA9480 USB Switch"
depends on I2C
@@ -498,6 +570,7 @@ config MAX8997_MUIC
Maxim MAX8997 PMIC.
The MAX8997 MUIC is a USB port accessory detector and switch.
+source "drivers/misc/Kconfig.stm"
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
@@ -506,4 +579,5 @@ source "drivers/misc/ti-st/Kconfig"
source "drivers/misc/lis3lv02d/Kconfig"
source "drivers/misc/carma/Kconfig"
source "drivers/misc/altera-stapl/Kconfig"
+source "drivers/misc/modem_audio/Kconfig"
endmenu
diff --git a/drivers/misc/Kconfig.stm b/drivers/misc/Kconfig.stm
new file mode 100644
index 00000000000..d509c85c79f
--- /dev/null
+++ b/drivers/misc/Kconfig.stm
@@ -0,0 +1,120 @@
+menuconfig STM_TRACE
+ bool "STM MIPI Trace driver"
+ depends on ARCH_U8500
+ help
+ Simple System Trace Module driver. It allows to use and configure the
+ STM, either from kernel space, or from user space.
+
+if STM_TRACE
+
+config STM_NUMBER_OF_CHANNEL
+ int
+ default 512 if ARCH_U8500
+ default 256
+ help
+ Number Max of channels always a multiple of 256
+
+config STM_DEFAULT_MASTERS_MODES
+ hex "channel mode"
+ default 0xffffffff
+ help
+ Default config for enabling hardware mode tracing
+
+config STM_PRINTK
+ bool "printk support"
+ depends on STM_TRACE
+ help
+ Duplicate printk output on STM printk channel & activate stm_printk
+
+config STM_PRINTK_CHANNEL
+ int "printk channel"
+ range 0 255
+ depends on STM_PRINTK
+ default 255
+ help
+ STM printk channel number
+
+config STM_FTRACE
+ bool "functions tracing"
+ depends on FTRACE
+ default y
+ help
+ Output function tracing on STM dedicated channel
+
+config STM_FTRACE_CHANNEL
+ int "ftrace channel"
+ range 0 255
+ depends on STM_FTRACE
+ default 254
+ help
+ STM ftrace channel number
+
+config STM_CTX_SWITCH
+ bool "Context switch tracing"
+ depends on CONTEXT_SWITCH_TRACER
+ default y
+ help
+ Output scheduler context switch on STM dedicated channel
+
+config STM_CTX_SWITCH_CHANNEL
+ int "Context switch channel"
+ range 0 255
+ depends on STM_CTX_SWITCH
+ default 253
+ help
+ STM Context switch channel number
+
+config STM_WAKEUP
+ bool "Scheduler wakeup tracing"
+ depends on CONTEXT_SWITCH_TRACER
+ default y
+ help
+ Output scheduler wakeup on STM dedicated channel
+
+config STM_WAKEUP_CHANNEL
+ int "Wakeup channel"
+ range 0 255
+ depends on STM_WAKEUP
+ default 252
+ help
+ STM scheduler wakeup channel number
+
+config STM_STACK_TRACE
+ bool "Stack tracing"
+ depends on STACKTRACE
+ default y
+ help
+ Output stack tracing on STM dedicated channel
+
+config STM_STACK_TRACE_CHANNEL
+ int "Stack trace channel"
+ range 0 255
+ depends on STM_STACK_TRACE
+ default 251
+ help
+ STM stack trace channel number
+
+config STM_TRACE_PRINTK
+ bool "trace printk & binary printk support"
+ depends on TRACING
+ default y
+ help
+ Duplicate trace printk output on STM printk channel
+
+config STM_TRACE_PRINTK_CHANNEL
+ int "trace_printk channel"
+ range 0 255
+ depends on TRACING
+ default 250
+ help
+ STM trace_printk channel number
+
+config STM_TRACE_BPRINTK_CHANNEL
+ int "trace_bprintk channel"
+ range 0 255
+ depends on TRACING
+ default 249
+ help
+ STM trace binary printk channel number
+
+endif
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 3e1d80106f0..fc02851cbe8 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -46,6 +46,16 @@ obj-y += ti-st/
obj-$(CONFIG_AB8500_PWM) += ab8500-pwm.o
obj-y += lis3lv02d/
obj-y += carma/
+obj-$(CONFIG_STM_TRACE) += stm.o
+obj-$(CONFIG_HWMEM) += hwmem/
+obj-$(CONFIG_DISPDEV) += dispdev/
+obj-$(CONFIG_COMPDEV) += compdev/
+obj-$(CONFIG_CLONEDEV) += clonedev/
+obj-$(CONFIG_STE_TRACE_MODEM) += db8500-modem-trace.o
+obj-$(CONFIG_DBX500_MLOADER) += dbx500-mloader.o
+obj-$(CONFIG_U5500_MBOX) += mbox.o mbox_channels-db5500.o
+obj-$(CONFIG_U8500_SIM_DETECT) += sim_detect.o
obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o
obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/
obj-$(CONFIG_MAX8997_MUIC) += max8997-muic.o
+obj-y += modem_audio/
diff --git a/drivers/misc/ab8500-pwm.c b/drivers/misc/ab8500-pwm.c
index d7a9aa14e5d..9d864e4db5a 100644
--- a/drivers/misc/ab8500-pwm.c
+++ b/drivers/misc/ab8500-pwm.c
@@ -8,10 +8,11 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/pwm.h>
+#include <linux/clk.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500.h>
#include <linux/module.h>
-
+#include <linux/mfd/ab8500/pwmleds.h>
/*
* PWM Out generators
* Bank: 0x10
@@ -19,6 +20,11 @@
#define AB8500_PWM_OUT_CTRL1_REG 0x60
#define AB8500_PWM_OUT_CTRL2_REG 0x61
#define AB8500_PWM_OUT_CTRL7_REG 0x66
+#define AB8505_PWM_OUT_BLINK_CTRL1_REG 0x68
+#define AB8505_PWM_OUT_BLINK_CTRL4_REG 0x6B
+#define AB8505_PWM_OUT_BLINK_CTRL_DUTYBIT 4
+#define AB8505_PWM_OUT_BLINK_DUTYMASK (0x0F << AB8505_PWM_OUT_BLINK_CTRL_DUTYBIT)
+
/* backlight driver constants */
#define ENABLE_PWM 1
@@ -27,12 +33,73 @@
struct pwm_device {
struct device *dev;
struct list_head node;
+ struct clk *clk;
const char *label;
unsigned int pwm_id;
+ unsigned int num_pwm;
+ unsigned int blink_en;
+ struct ab8500 *parent;
+ bool clk_enabled;
};
static LIST_HEAD(pwm_list);
+int pwm_config_blink(struct pwm_device *pwm, int duty_ns, int period_ns)
+{
+ int ret;
+ unsigned int value;
+ u8 reg;
+ if ((!is_ab8505(pwm->parent)) || (!pwm->blink_en)) {
+ dev_err(pwm->dev, "setting blinking for this "
+ "device not supported\n");
+ return -EINVAL;
+ }
+ /*
+ * get the period value that is to be written to
+ * AB8500_PWM_OUT_BLINK_CTRL1 REGS[0:2]
+ */
+ value = period_ns & 0x07;
+ /*
+ * get blink duty value to be written to
+ * AB8500_PWM_OUT_BLINK_CTRL REGS[7:4]
+ */
+ value |= ((duty_ns << AB8505_PWM_OUT_BLINK_CTRL_DUTYBIT) &
+ AB8505_PWM_OUT_BLINK_DUTYMASK);
+
+ reg = AB8505_PWM_OUT_BLINK_CTRL1_REG + (pwm->pwm_id - 1);
+
+ ret = abx500_set_register_interruptible(pwm->dev, AB8500_MISC,
+ reg, (u8)value);
+ if (ret < 0)
+ dev_err(pwm->dev, "%s: Failed to config PWM blink,Error %d\n",
+ pwm->label, ret);
+ return ret;
+}
+EXPORT_SYMBOL(pwm_config_blink);
+
+int pwm_blink_ctrl(struct pwm_device *pwm , int enable)
+{
+ int ret;
+
+ if ((!is_ab8505(pwm->parent)) || (!pwm->blink_en)) {
+ dev_err(pwm->dev, "setting blinking for this "
+ "device not supported\n");
+ return -EINVAL;
+ }
+ /*
+ * Enable/disable blinking feature for corresponding PWMOUT
+ * channel depending on value of enable.
+ */
+ ret = abx500_mask_and_set_register_interruptible(pwm->dev,
+ AB8500_MISC, AB8505_PWM_OUT_BLINK_CTRL4_REG,
+ 1 << (pwm->pwm_id-1), enable << (pwm->pwm_id-1));
+ if (ret < 0)
+ dev_err(pwm->dev, "%s: Failed to control PWM blink,Error %d\n",
+ pwm->label, ret);
+ return ret;
+}
+EXPORT_SYMBOL(pwm_blink_ctrl);
+
int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
{
int ret = 0;
@@ -67,11 +134,19 @@ int pwm_enable(struct pwm_device *pwm)
{
int ret;
+ if (!pwm->clk_enabled) {
+ ret = clk_enable(pwm->clk);
+ if (ret < 0) {
+ dev_err(pwm->dev, "failed to enable clock\n");
+ return ret;
+ }
+ pwm->clk_enabled = true;
+ }
ret = abx500_mask_and_set_register_interruptible(pwm->dev,
AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
- 1 << (pwm->pwm_id-1), ENABLE_PWM);
+ 1 << (pwm->pwm_id-1), 1 << (pwm->pwm_id-1));
if (ret < 0)
- dev_err(pwm->dev, "%s: Failed to disable PWM, Error %d\n",
+ dev_err(pwm->dev, "%s: Failed to enable PWM, Error %d\n",
pwm->label, ret);
return ret;
}
@@ -84,9 +159,27 @@ void pwm_disable(struct pwm_device *pwm)
ret = abx500_mask_and_set_register_interruptible(pwm->dev,
AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
1 << (pwm->pwm_id-1), DISABLE_PWM);
+ /*
+ * Workaround to set PWM in disable.
+ * If enable bit is not toggled the PWM might output 50/50 duty cycle
+ * even though it should be disabled
+ */
+ ret &= abx500_mask_and_set_register_interruptible(pwm->dev,
+ AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
+ 1 << (pwm->pwm_id-1),
+ ENABLE_PWM << (pwm->pwm_id-1));
+ ret &= abx500_mask_and_set_register_interruptible(pwm->dev,
+ AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
+ 1 << (pwm->pwm_id-1), DISABLE_PWM);
+
if (ret < 0)
dev_err(pwm->dev, "%s: Failed to disable PWM, Error %d\n",
pwm->label, ret);
+ if (pwm->clk_enabled) {
+ clk_disable(pwm->clk);
+ pwm->clk_enabled = false;
+ }
+
return;
}
EXPORT_SYMBOL(pwm_disable);
@@ -94,7 +187,6 @@ EXPORT_SYMBOL(pwm_disable);
struct pwm_device *pwm_request(int pwm_id, const char *label)
{
struct pwm_device *pwm;
-
list_for_each_entry(pwm, &pwm_list, node) {
if (pwm->pwm_id == pwm_id) {
pwm->label = label;
@@ -113,30 +205,131 @@ void pwm_free(struct pwm_device *pwm)
}
EXPORT_SYMBOL(pwm_free);
+static ssize_t store_blink_status(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct pwm_device *pwm;
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val))
+ return -EINVAL;
+ list_for_each_entry(pwm, &pwm_list, node) {
+ if (pwm->pwm_id == val)
+ break;
+ else {
+ /* check if PWM ID is valid*/
+ if (val > pwm->num_pwm) {
+ dev_err(pwm->dev, "Invalid PWM ID\n");
+ return -EINVAL;
+ }
+ }
+ }
+ if ((!is_ab8505(pwm->parent)) || (!pwm->blink_en)) {
+ dev_err(pwm->dev, "setting blinking for this "
+ "device not supported\n");
+ return -EINVAL;
+ }
+ /*Disable blink functionlity */
+ pwm_blink_ctrl(pwm, 0);
+ return count;
+}
+
+static DEVICE_ATTR(disable_blink, S_IWUGO, NULL, store_blink_status);
+
+static struct attribute *pwmled_attributes[] = {
+ &dev_attr_disable_blink.attr,
+ NULL
+};
+
+static const struct attribute_group pwmled_attr_group = {
+ .attrs = pwmled_attributes,
+};
+
static int __devinit ab8500_pwm_probe(struct platform_device *pdev)
{
+ struct ab8500 *parent = dev_get_drvdata(pdev->dev.parent);
+ struct ab8500_platform_data *plat = dev_get_platdata(parent->dev);
+ struct ab8500_pwmled_platform_data *pdata;
struct pwm_device *pwm;
+ int ret = 0 , i;
+
+ /* get pwmled specific platform data */
+ if (!plat->pwmled) {
+ dev_err(&pdev->dev, "no pwm platform data supplied\n");
+ return -EINVAL;
+ }
+ pdata = plat->pwmled;
/*
* Nothing to be done in probe, this is required to get the
* device which is required for ab8500 read and write
*/
- pwm = kzalloc(sizeof(struct pwm_device), GFP_KERNEL);
+ pwm = kzalloc(((sizeof(struct pwm_device)) * pdata->num_pwm),
+ GFP_KERNEL);
if (pwm == NULL) {
dev_err(&pdev->dev, "failed to allocate memory\n");
return -ENOMEM;
}
- pwm->dev = &pdev->dev;
- pwm->pwm_id = pdev->id;
- list_add_tail(&pwm->node, &pwm_list);
+ for (i = 0; i < pdata->num_pwm; i++) {
+ pwm[i].dev = &pdev->dev;
+ pwm[i].parent = parent;
+ pwm[i].blink_en = pdata->leds[i].blink_en;
+ pwm[i].pwm_id = pdata->leds[i].pwm_id;
+ pwm[i].num_pwm = pdata->num_pwm;
+ list_add_tail(&pwm[i].node, &pwm_list);
+ }
+ for (i = 0; i < pdata->num_pwm; i++) {
+ /*Implement sysfs only if blink is enabled*/
+ if ((is_ab8505(pwm[i].parent)) && (pwm[i].blink_en)) {
+ /* sysfs implementation to disable the blink */
+ ret = sysfs_create_group(&pdev->dev.kobj,
+ &pwmled_attr_group);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to create"
+ " sysfs entries\n");
+ goto fail;
+ }
+ break;
+ }
+ }
+ pwm->clk = clk_get(pwm->dev, NULL);
+ if (IS_ERR(pwm->clk)) {
+ dev_err(pwm->dev, "clock request failed\n");
+ ret = PTR_ERR(pwm->clk);
+ goto err_clk;
+ }
platform_set_drvdata(pdev, pwm);
+ pwm->clk_enabled = false;
dev_dbg(pwm->dev, "pwm probe successful\n");
- return 0;
+ return ret;
+
+err_clk:
+ for (i = 0; i < pdata->num_pwm; i++) {
+ if ((is_ab8505(pwm[i].parent)) && (pwm[i].blink_en)) {
+ sysfs_remove_group(&pdev->dev.kobj,
+ &pwmled_attr_group);
+ break;
+ }
+ }
+fail:
+ list_del(&pwm->node);
+ kfree(pwm);
+ return ret;
}
static int __devexit ab8500_pwm_remove(struct platform_device *pdev)
{
struct pwm_device *pwm = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < pwm->num_pwm; i++) {
+ if ((is_ab8505(pwm[i].parent)) && (pwm[i].blink_en)) {
+ sysfs_remove_group(&pdev->dev.kobj,
+ &pwmled_attr_group);
+ break;
+ }
+ }
list_del(&pwm->node);
+ clk_put(pwm->clk);
dev_dbg(&pdev->dev, "pwm driver removed\n");
kfree(pwm);
return 0;
diff --git a/drivers/misc/bh1780gli.c b/drivers/misc/bh1780gli.c
index 54f6f39f990..1035cb37695 100644
--- a/drivers/misc/bh1780gli.c
+++ b/drivers/misc/bh1780gli.c
@@ -18,11 +18,17 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/i2c.h>
+#include <linux/err.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
#define BH1780_REG_CONTROL 0x80
#define BH1780_REG_PARTID 0x8A
@@ -40,11 +46,20 @@
struct bh1780_data {
struct i2c_client *client;
+ struct regulator *regulator;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ struct early_suspend early_suspend;
+#endif
int power_state;
/* lock for sysfs operations */
struct mutex lock;
};
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void bh1780_early_suspend(struct early_suspend *ddata);
+static void bh1780_late_resume(struct early_suspend *ddata);
+#endif
+
static int bh1780_write(struct bh1780_data *ddata, u8 reg, u8 val, char *msg)
{
int ret = i2c_smbus_write_byte_data(ddata->client, reg, val);
@@ -72,6 +87,9 @@ static ssize_t bh1780_show_lux(struct device *dev,
struct bh1780_data *ddata = platform_get_drvdata(pdev);
int lsb, msb;
+ if (ddata->power_state == BH1780_POFF)
+ return -EINVAL;
+
lsb = bh1780_read(ddata, BH1780_REG_DLOW, "DLOW");
if (lsb < 0)
return lsb;
@@ -89,13 +107,9 @@ static ssize_t bh1780_show_power_state(struct device *dev,
{
struct platform_device *pdev = to_platform_device(dev);
struct bh1780_data *ddata = platform_get_drvdata(pdev);
- int state;
-
- state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL");
- if (state < 0)
- return state;
- return sprintf(buf, "%d\n", state & BH1780_POWMASK);
+ /* we already maintain a sw state */
+ return sprintf(buf, "%d\n", ddata->power_state);
}
static ssize_t bh1780_store_power_state(struct device *dev,
@@ -104,7 +118,7 @@ static ssize_t bh1780_store_power_state(struct device *dev,
{
struct platform_device *pdev = to_platform_device(dev);
struct bh1780_data *ddata = platform_get_drvdata(pdev);
- unsigned long val;
+ long val;
int error;
error = strict_strtoul(buf, 0, &val);
@@ -114,15 +128,25 @@ static ssize_t bh1780_store_power_state(struct device *dev,
if (val < BH1780_POFF || val > BH1780_PON)
return -EINVAL;
+ if (ddata->power_state == val)
+ return count;
+
mutex_lock(&ddata->lock);
+ if (ddata->power_state == BH1780_POFF)
+ regulator_enable(ddata->regulator);
+
error = bh1780_write(ddata, BH1780_REG_CONTROL, val, "CONTROL");
if (error < 0) {
mutex_unlock(&ddata->lock);
+ regulator_disable(ddata->regulator);
return error;
}
- msleep(BH1780_PON_DELAY);
+ if (val == BH1780_POFF)
+ regulator_disable(ddata->regulator);
+
+ mdelay(BH1780_PON_DELAY);
ddata->power_state = val;
mutex_unlock(&ddata->lock);
@@ -131,7 +155,7 @@ static ssize_t bh1780_store_power_state(struct device *dev,
static DEVICE_ATTR(lux, S_IRUGO, bh1780_show_lux, NULL);
-static DEVICE_ATTR(power_state, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(power_state, S_IWUGO | S_IRUGO,
bh1780_show_power_state, bh1780_store_power_state);
static struct attribute *bh1780_attributes[] = {
@@ -153,21 +177,42 @@ static int __devinit bh1780_probe(struct i2c_client *client,
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE)) {
ret = -EIO;
- goto err_op_failed;
+ return ret;
}
ddata = kzalloc(sizeof(struct bh1780_data), GFP_KERNEL);
if (ddata == NULL) {
+ dev_err(&client->dev, "failed to alloc ddata\n");
ret = -ENOMEM;
- goto err_op_failed;
+ return ret;
}
ddata->client = client;
i2c_set_clientdata(client, ddata);
+ ddata->regulator = regulator_get(&client->dev, "vcc");
+ if (IS_ERR(ddata->regulator)) {
+ dev_err(&client->dev, "failed to get regulator\n");
+ ret = PTR_ERR(ddata->regulator);
+ goto free_ddata;
+ }
+
+ regulator_enable(ddata->regulator);
+
ret = bh1780_read(ddata, BH1780_REG_PARTID, "PART ID");
- if (ret < 0)
- goto err_op_failed;
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to read part ID\n");
+ goto disable_regulator;
+ }
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ ddata->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+ ddata->early_suspend.suspend = bh1780_early_suspend;
+ ddata->early_suspend.resume = bh1780_late_resume;
+ register_early_suspend(&ddata->early_suspend);
+#endif
+
+ regulator_disable(ddata->regulator);
+ ddata->power_state = BH1780_POFF;
dev_info(&client->dev, "Ambient Light Sensor, Rev : %d\n",
(ret & BH1780_REVMASK));
@@ -175,12 +220,17 @@ static int __devinit bh1780_probe(struct i2c_client *client,
mutex_init(&ddata->lock);
ret = sysfs_create_group(&client->dev.kobj, &bh1780_attr_group);
- if (ret)
- goto err_op_failed;
+ if (ret) {
+ dev_err(&client->dev, "failed to create sysfs group\n");
+ goto put_regulator;
+ }
return 0;
-
-err_op_failed:
+disable_regulator:
+ regulator_disable(ddata->regulator);
+put_regulator:
+ regulator_put(ddata->regulator);
+free_ddata:
kfree(ddata);
return ret;
}
@@ -196,50 +246,106 @@ static int __devexit bh1780_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM
-static int bh1780_suspend(struct device *dev)
+#if defined(CONFIG_HAS_EARLYSUSPEND) || defined(CONFIG_PM)
+static int bh1780_do_suspend(struct bh1780_data *ddata)
{
- struct bh1780_data *ddata;
- int state, ret;
- struct i2c_client *client = to_i2c_client(dev);
+ int ret = 0;
- ddata = i2c_get_clientdata(client);
- state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL");
- if (state < 0)
- return state;
+ mutex_lock(&ddata->lock);
- ddata->power_state = state & BH1780_POWMASK;
+ if (ddata->power_state == BH1780_POFF)
+ goto unlock;
- ret = bh1780_write(ddata, BH1780_REG_CONTROL, BH1780_POFF,
- "CONTROL");
+ ret = bh1780_write(ddata, BH1780_REG_CONTROL, BH1780_POFF, "CONTROL");
if (ret < 0)
- return ret;
+ goto unlock;
- return 0;
+ if (ddata->regulator)
+ regulator_disable(ddata->regulator);
+unlock:
+ mutex_unlock(&ddata->lock);
+ return ret;
}
-static int bh1780_resume(struct device *dev)
+static int bh1780_do_resume(struct bh1780_data *ddata)
{
- struct bh1780_data *ddata;
- int state, ret;
- struct i2c_client *client = to_i2c_client(dev);
+ int ret = 0;
- ddata = i2c_get_clientdata(client);
- state = ddata->power_state;
- ret = bh1780_write(ddata, BH1780_REG_CONTROL, state,
- "CONTROL");
+ mutex_lock(&ddata->lock);
+
+ if (ddata->power_state == BH1780_POFF)
+ goto unlock;
+ if (ddata->regulator)
+ regulator_enable(ddata->regulator);
+
+ ret = bh1780_write(ddata, BH1780_REG_CONTROL,
+ ddata->power_state, "CONTROL");
+
+unlock:
+ mutex_unlock(&ddata->lock);
+ return ret;
+}
+#endif
+
+#ifndef CONFIG_HAS_EARLYSUSPEND
+#ifdef CONFIG_PM
+static int bh1780_suspend(struct device *dev)
+{
+ struct bh1780_data *ddata = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = bh1780_do_suspend(ddata);
if (ret < 0)
- return ret;
+ dev_err(&ddata->client->dev,
+ "Error while suspending the device\n");
- return 0;
+ return ret;
}
+
+static int bh1780_resume(struct device *dev)
+{
+ struct bh1780_data *ddata = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = bh1780_do_resume(ddata);
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "Error while resuming the device\n");
+
+ return ret;
+}
+
static SIMPLE_DEV_PM_OPS(bh1780_pm, bh1780_suspend, bh1780_resume);
#define BH1780_PMOPS (&bh1780_pm)
+#endif /* CONFIG_PM */
#else
#define BH1780_PMOPS NULL
-#endif /* CONFIG_PM */
+static void bh1780_early_suspend(struct early_suspend *data)
+{
+ struct bh1780_data *ddata =
+ container_of(data, struct bh1780_data, early_suspend);
+ int ret;
+
+ ret = bh1780_do_suspend(ddata);
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "Error while suspending the device\n");
+}
+
+static void bh1780_late_resume(struct early_suspend *data)
+{
+ struct bh1780_data *ddata =
+ container_of(data, struct bh1780_data, early_suspend);
+ int ret;
+
+ ret = bh1780_do_resume(ddata);
+ if (ret < 0)
+ dev_err(&ddata->client->dev,
+ "Error while resuming the device\n");
+}
+#endif /*!CONFIG_HAS_EARLYSUSPEND */
static const struct i2c_device_id bh1780_id[] = {
{ "bh1780", 0 },
@@ -252,7 +358,9 @@ static struct i2c_driver bh1780_driver = {
.id_table = bh1780_id,
.driver = {
.name = "bh1780",
+#if (!defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM))
.pm = BH1780_PMOPS,
+#endif
},
};
diff --git a/drivers/misc/clonedev/Makefile b/drivers/misc/clonedev/Makefile
new file mode 100644
index 00000000000..f84859dd3ee
--- /dev/null
+++ b/drivers/misc/clonedev/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_CLONEDEV) += clonedev.o
+
+ifdef CONFIG_CLONEDEV_DEBUG
+EXTRA_CFLAGS += -DDEBUG
+endif
diff --git a/drivers/misc/clonedev/clonedev.c b/drivers/misc/clonedev/clonedev.c
new file mode 100644
index 00000000000..d3b770fd324
--- /dev/null
+++ b/drivers/misc/clonedev/clonedev.c
@@ -0,0 +1,312 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Device for display cloning on external output.
+ *
+ * Author: Per-Daniel Olsson <per-daniel.olsson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/ioctl.h>
+
+#include <linux/clonedev.h>
+
+#include <linux/compdev.h>
+#include <linux/mm.h>
+#include <video/mcde.h>
+
+static LIST_HEAD(dev_list);
+static DEFINE_MUTEX(dev_list_lock);
+
+struct clonedev {
+ struct mutex lock;
+ struct miscdevice mdev;
+ struct list_head list;
+ bool open;
+ struct compdev *src_compdev;
+ struct compdev *dst_compdev;
+ bool overlay_case;
+ struct compdev_size dst_size;
+ struct compdev_scene_info s_info;
+};
+
+static void best_fit(struct compdev_rect *src_rect,
+ struct compdev_size *dst_size,
+ struct compdev_img *img)
+{
+ /* aspect ratio in 26.6 fixed point */
+ int aspect = 1;
+ int dst_w;
+ int dst_h;
+
+ if (img->rotation == COMPDEV_ROT_90_CCW ||
+ img->rotation == COMPDEV_ROT_270_CCW)
+ aspect = (src_rect->height << 6) / src_rect->width;
+ else
+ aspect = (src_rect->width << 6) / src_rect->height;
+
+ dst_w = aspect * dst_size->height >> 6;
+ dst_h = dst_size->height;
+ img->dst_rect.y = 0;
+
+ if (dst_w > dst_size->width) {
+ /*
+ * Destination rectangle too wide.
+ * Clamp to image width. Keep aspect ratio.
+ */
+ dst_h = (dst_size->width << 6) / aspect;
+ dst_w = dst_size->width;
+ }
+
+ /* center the image */
+ if (dst_w < dst_size->width) {
+ int offset = (dst_size->width - dst_w) / 2;
+ img->dst_rect.x = offset;
+ }
+
+ if (dst_h < dst_size->height) {
+ int offset = (dst_size->height - dst_h) / 2;
+ img->dst_rect.y = offset;
+ }
+
+ img->dst_rect.width = dst_w;
+ img->dst_rect.height = dst_h;
+}
+
+static int clonedev_open(struct inode *inode, struct file *file)
+{
+ struct clonedev *cd = NULL;
+
+ mutex_lock(&dev_list_lock);
+ list_for_each_entry(cd, &dev_list, list)
+ if (cd->mdev.minor == iminor(inode))
+ break;
+
+ if (&cd->list == &dev_list) {
+ mutex_unlock(&dev_list_lock);
+ return -ENODEV;
+ }
+
+ if (cd->open) {
+ mutex_unlock(&dev_list_lock);
+ return -EBUSY;
+ }
+
+ cd->open = true;
+
+ mutex_unlock(&dev_list_lock);
+
+ file->private_data = cd;
+
+ return 0;
+}
+
+static int clonedev_release(struct inode *inode, struct file *file)
+{
+ struct clonedev *cd = NULL;
+
+ mutex_lock(&dev_list_lock);
+ list_for_each_entry(cd, &dev_list, list)
+ if (cd->mdev.minor == iminor(inode))
+ break;
+ mutex_unlock(&dev_list_lock);
+
+ if (&cd->list == &dev_list)
+ return -ENODEV;
+
+ cd->open = false;
+ return 0;
+}
+
+static long clonedev_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+ struct clonedev *cd = (struct clonedev *)file->private_data;
+
+ mutex_lock(&cd->lock);
+
+ switch (cmd) {
+ case CLONEDEV_SET_MODE_IOC:
+ /* TODO: Get the user data */
+
+ break;
+
+ default:
+ ret = -ENOSYS;
+ }
+
+ mutex_unlock(&cd->lock);
+
+ return ret;
+}
+
+static const struct file_operations clonedev_fops = {
+ .open = clonedev_open,
+ .release = clonedev_release,
+ .unlocked_ioctl = clonedev_ioctl,
+};
+
+static void init_clonedev(struct clonedev *cd, const char *name)
+{
+ mutex_init(&cd->lock);
+ INIT_LIST_HEAD(&cd->list);
+
+ cd->mdev.minor = MISC_DYNAMIC_MINOR;
+ cd->mdev.name = name;
+ cd->mdev.fops = &clonedev_fops;
+}
+
+static void clonedev_post_buffer_callback(void *data,
+ struct compdev_img *cb_img)
+{
+ struct clonedev *cd = (struct clonedev *)data;
+
+ mutex_lock(&cd->lock);
+
+ if (!cd->overlay_case || (cd->overlay_case &&
+ (cb_img->flags & COMPDEV_OVERLAY_FLAG))) {
+ struct compdev_img img;
+
+ img = *cb_img;
+
+ if (img.flags & COMPDEV_BYPASS_FLAG)
+ img.flags &= ~COMPDEV_BYPASS_FLAG;
+
+ if (cd->overlay_case)
+ img.rotation = cd->s_info.ovly_rotation;
+ else
+ img.rotation = cd->s_info.fb_rotation;
+
+ best_fit(&img.src_rect, &cd->dst_size, &img);
+
+ compdev_post_buffer(cd->dst_compdev, &img);
+ }
+ mutex_unlock(&cd->lock);
+}
+
+static void clonedev_post_scene_info_callback(void *data,
+ struct compdev_scene_info *s_info)
+{
+ struct clonedev *cd = (struct clonedev *)data;
+
+ mutex_lock(&cd->lock);
+ if (s_info->img_count > 1)
+ cd->overlay_case = true;
+ else
+ cd->overlay_case = false;
+
+ cd->s_info = *s_info;
+ cd->s_info.img_count = 1;
+ compdev_post_scene_info(cd->dst_compdev, &cd->s_info);
+ mutex_unlock(&cd->lock);
+}
+
+int clonedev_create(void)
+{
+ int ret;
+ struct clonedev *cd;
+
+ static int counter;
+ char name[10];
+
+ cd = kzalloc(sizeof(struct clonedev), GFP_KERNEL);
+ if (!cd)
+ return -ENOMEM;
+
+ snprintf(name, sizeof(name), "%s%d", CLONEDEV_DEFAULT_DEVICE_PREFIX,
+ counter++);
+ init_clonedev(cd, name);
+
+ ret = misc_register(&cd->mdev);
+ if (ret)
+ goto fail_register_misc;
+ mutex_lock(&dev_list_lock);
+ list_add_tail(&cd->list, &dev_list);
+ mutex_unlock(&dev_list_lock);
+
+ mutex_lock(&cd->lock);
+
+ compdev_get(0, &cd->src_compdev);
+ compdev_get(1, &cd->dst_compdev);
+ compdev_get_size(cd->dst_compdev, &cd->dst_size);
+
+ compdev_register_listener_callbacks(cd->src_compdev, (void *)cd,
+ &clonedev_post_buffer_callback,
+ &clonedev_post_scene_info_callback);
+
+ mutex_unlock(&cd->lock);
+ goto out;
+
+fail_register_misc:
+ kfree(cd);
+out:
+ return ret;
+}
+
+void clonedev_destroy(void)
+{
+ struct clonedev *cd;
+ struct clonedev *tmp;
+
+ mutex_lock(&dev_list_lock);
+ list_for_each_entry_safe(cd, tmp, &dev_list, list) {
+ compdev_put(cd->src_compdev);
+ compdev_put(cd->dst_compdev);
+ compdev_deregister_callbacks(cd->src_compdev);
+ list_del(&cd->list);
+ misc_deregister(&cd->mdev);
+ kfree(cd);
+ break;
+ }
+ mutex_unlock(&dev_list_lock);
+}
+
+static void clonedev_destroy_all(void)
+{
+ struct clonedev *cd;
+ struct clonedev *tmp;
+
+ mutex_lock(&dev_list_lock);
+ list_for_each_entry_safe(cd, tmp, &dev_list, list) {
+ list_del(&cd->list);
+ misc_deregister(&cd->mdev);
+ kfree(cd);
+ }
+ mutex_unlock(&dev_list_lock);
+
+ mutex_destroy(&dev_list_lock);
+}
+
+static int __init clonedev_init(void)
+{
+ pr_info("%s\n", __func__);
+
+ mutex_init(&dev_list_lock);
+
+ return 0;
+}
+module_init(clonedev_init);
+
+static void __exit clonedev_exit(void)
+{
+ clonedev_destroy_all();
+ pr_info("%s\n", __func__);
+}
+module_exit(clonedev_exit);
+
+MODULE_AUTHOR("Per-Daniel Olsson <per-daniel.olsson@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Device for display cloning on external output");
+
diff --git a/drivers/misc/compdev/Makefile b/drivers/misc/compdev/Makefile
new file mode 100644
index 00000000000..b8385848712
--- /dev/null
+++ b/drivers/misc/compdev/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_COMPDEV) += compdev.o
+
+ifdef CONFIG_COMPDEV_DEBUG
+EXTRA_CFLAGS += -DDEBUG
+endif
+
diff --git a/drivers/misc/compdev/compdev.c b/drivers/misc/compdev/compdev.c
new file mode 100644
index 00000000000..d929a02c565
--- /dev/null
+++ b/drivers/misc/compdev/compdev.c
@@ -0,0 +1,1381 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Display overlay compositer device driver
+ *
+ * Author: Anders Bauer <anders.bauer@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * Modified: Per-Daniel Olsson <per-daniel.olsson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/ioctl.h>
+#include <linux/sched.h>
+
+#include <linux/compdev.h>
+#include <linux/hwmem.h>
+#include <linux/mm.h>
+#include <video/mcde_dss.h>
+#include <video/b2r2_blt.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+
+#define BUFFER_CACHE_DEPTH 2
+#define NUM_COMPDEV_BUFS 2
+
+static LIST_HEAD(dev_list);
+static DEFINE_MUTEX(dev_list_lock);
+static int dev_counter;
+
+struct compdev_buffer {
+ struct hwmem_alloc *alloc;
+ enum compdev_ptr_type type;
+ u32 size;
+ u32 paddr; /* if pinned */
+};
+
+struct compdev_img_internal {
+ struct compdev_img img;
+ u32 ref_count;
+};
+
+struct compdev_blt_work {
+ struct work_struct work;
+ struct compdev_img *src_img;
+ struct compdev_img_internal *dst_img;
+ int blt_handle;
+ bool mcde_rotation;
+ struct device *dev;
+};
+
+struct compdev_post_callback_work {
+ struct work_struct work;
+ struct compdev_img *img;
+ post_buffer_callback pb_cb;
+ void *cb_data;
+ struct device *dev;
+};
+
+struct buffer_cache_context {
+ struct compdev_img_internal
+ *img[BUFFER_CACHE_DEPTH];
+ u8 index;
+ u8 unused_counter;
+ struct device *dev;
+};
+
+struct dss_context {
+ struct device *dev;
+ struct mcde_display_device *ddev;
+ struct mcde_overlay *ovly[NUM_COMPDEV_BUFS];
+ struct compdev_buffer ovly_buffer[NUM_COMPDEV_BUFS];
+ struct compdev_size phy_size;
+ enum mcde_display_rotation display_rotation;
+ enum compdev_rotation current_buffer_rotation;
+ int blt_handle;
+ u8 temp_img_count;
+ struct compdev_img_internal *temp_img[NUM_COMPDEV_BUFS];
+ struct buffer_cache_context cache_ctx;
+};
+
+struct compdev {
+ struct mutex lock;
+ struct miscdevice mdev;
+ struct device *dev;
+ struct list_head list;
+ struct dss_context dss_ctx;
+ u16 ref_count;
+ struct workqueue_struct *worker_thread;
+ int dev_index;
+ post_buffer_callback pb_cb;
+ post_scene_info_callback si_cb;
+ struct compdev_scene_info s_info;
+ u8 sync_count;
+ u8 image_count;
+ struct compdev_img *images[NUM_COMPDEV_BUFS];
+ struct completion fence;
+ void *cb_data;
+ bool mcde_rotation;
+};
+
+static struct compdev *compdevs[MAX_NBR_OF_COMPDEVS];
+
+static int compdev_post_buffers_dss(struct dss_context *dss_ctx,
+ struct compdev_img *img1, struct compdev_img *img2);
+
+
+static int compdev_open(struct inode *inode, struct file *file)
+{
+ struct compdev *cd = NULL;
+
+ mutex_lock(&dev_list_lock);
+ list_for_each_entry(cd, &dev_list, list)
+ if (cd->mdev.minor == iminor(inode))
+ break;
+
+ if (&cd->list == &dev_list) {
+ mutex_unlock(&dev_list_lock);
+ return -ENODEV;
+ }
+ mutex_unlock(&dev_list_lock);
+ file->private_data = cd;
+ return 0;
+}
+
+static int disable_overlay(struct mcde_overlay *ovly)
+{
+ struct mcde_overlay_info info;
+
+ mcde_dss_get_overlay_info(ovly, &info);
+ if (info.paddr != 0) {
+ /* Set the pointer to zero to disable the overlay */
+ info.paddr = 0;
+ mcde_dss_apply_overlay(ovly, &info);
+ }
+ return 0;
+}
+
+static int compdev_release(struct inode *inode, struct file *file)
+{
+ struct compdev *cd = NULL;
+ int i;
+
+ mutex_lock(&dev_list_lock);
+ list_for_each_entry(cd, &dev_list, list)
+ if (cd->mdev.minor == iminor(inode))
+ break;
+ mutex_unlock(&dev_list_lock);
+
+ if (&cd->list == &dev_list)
+ return -ENODEV;
+
+ for (i = 0; i < NUM_COMPDEV_BUFS; i++) {
+ disable_overlay(cd->dss_ctx.ovly[i]);
+ if (cd->dss_ctx.ovly_buffer[i].paddr &&
+ cd->dss_ctx.ovly_buffer[i].type ==
+ COMPDEV_PTR_HWMEM_BUF_NAME_OFFSET)
+ hwmem_unpin(cd->dss_ctx.ovly_buffer[i].alloc);
+
+ cd->dss_ctx.ovly_buffer[i].alloc = NULL;
+ cd->dss_ctx.ovly_buffer[i].size = 0;
+ cd->dss_ctx.ovly_buffer[i].paddr = 0;
+ }
+
+ return 0;
+}
+
+static enum mcde_ovly_pix_fmt get_ovly_fmt(enum compdev_fmt fmt)
+{
+ switch (fmt) {
+ default:
+ case COMPDEV_FMT_RGB565:
+ return MCDE_OVLYPIXFMT_RGB565;
+ case COMPDEV_FMT_RGB888:
+ return MCDE_OVLYPIXFMT_RGB888;
+ case COMPDEV_FMT_RGBA8888:
+ return MCDE_OVLYPIXFMT_RGBA8888;
+ case COMPDEV_FMT_RGBX8888:
+ return MCDE_OVLYPIXFMT_RGBX8888;
+ case COMPDEV_FMT_YUV422:
+ return MCDE_OVLYPIXFMT_YCbCr422;
+ }
+}
+
+static int compdev_setup_ovly(struct compdev_img *img,
+ struct compdev_buffer *buffer,
+ struct mcde_overlay *ovly,
+ int z_order,
+ struct dss_context *dss_ctx)
+{
+ int ret = 0;
+ enum hwmem_mem_type memtype;
+ enum hwmem_access access;
+ struct hwmem_mem_chunk mem_chunk;
+ size_t mem_chunk_length = 1;
+ struct hwmem_region rgn = { .offset = 0, .count = 1, .start = 0 };
+ struct mcde_overlay_info info;
+
+ if (img->buf.type == COMPDEV_PTR_HWMEM_BUF_NAME_OFFSET) {
+ buffer->type = COMPDEV_PTR_HWMEM_BUF_NAME_OFFSET;
+ buffer->alloc = hwmem_resolve_by_name(img->buf.hwmem_buf_name);
+ if (IS_ERR(buffer->alloc)) {
+ ret = PTR_ERR(buffer->alloc);
+ dev_warn(dss_ctx->dev,
+ "HWMEM resolve failed, %d\n", ret);
+ goto resolve_failed;
+ }
+
+ hwmem_get_info(buffer->alloc, &buffer->size, &memtype,
+ &access);
+
+ if (!(access & HWMEM_ACCESS_READ) ||
+ memtype != HWMEM_MEM_CONTIGUOUS_SYS) {
+ ret = -EACCES;
+ dev_warn(dss_ctx->dev,
+ "Invalid_mem overlay, %d\n", ret);
+ goto invalid_mem;
+ }
+ ret = hwmem_pin(buffer->alloc, &mem_chunk, &mem_chunk_length);
+ if (ret) {
+ dev_warn(dss_ctx->dev,
+ "Pin failed, %d\n", ret);
+ goto pin_failed;
+ }
+
+ rgn.size = rgn.end = buffer->size;
+ ret = hwmem_set_domain(buffer->alloc, HWMEM_ACCESS_READ,
+ HWMEM_DOMAIN_SYNC, &rgn);
+ if (ret)
+ dev_warn(dss_ctx->dev,
+ "Set domain failed, %d\n", ret);
+
+ buffer->paddr = mem_chunk.paddr;
+ } else if (img->buf.type == COMPDEV_PTR_PHYSICAL) {
+ buffer->type = COMPDEV_PTR_PHYSICAL;
+ buffer->alloc = NULL;
+ buffer->size = img->buf.len;
+ buffer->paddr = img->buf.offset;
+ }
+
+ info.stride = img->pitch;
+ info.fmt = get_ovly_fmt(img->fmt);
+ info.src_x = 0;
+ info.src_y = 0;
+ info.dst_x = img->dst_rect.x;
+ info.dst_y = img->dst_rect.y;
+ info.dst_z = z_order;
+ info.w = img->dst_rect.width;
+ info.h = img->dst_rect.height;
+ info.dirty.x = 0;
+ info.dirty.y = 0;
+ info.dirty.w = img->dst_rect.width;
+ info.dirty.h = img->dst_rect.height;
+ info.paddr = buffer->paddr;
+
+ mcde_dss_apply_overlay(ovly, &info);
+ return ret;
+
+pin_failed:
+invalid_mem:
+ buffer->alloc = NULL;
+ buffer->size = 0;
+ buffer->paddr = 0;
+
+resolve_failed:
+ return ret;
+}
+
+static int compdev_update_rotation(struct dss_context *dss_ctx,
+ enum compdev_rotation rotation)
+{
+ /* Set video mode */
+ struct mcde_video_mode vmode;
+ int ret = 0;
+
+ memset(&vmode, 0, sizeof(struct mcde_video_mode));
+ mcde_dss_get_video_mode(dss_ctx->ddev, &vmode);
+ if ((dss_ctx->display_rotation + rotation) % 180) {
+ vmode.xres = dss_ctx->phy_size.height;
+ vmode.yres = dss_ctx->phy_size.width;
+ } else {
+ vmode.xres = dss_ctx->phy_size.width;
+ vmode.yres = dss_ctx->phy_size.height;
+ }
+
+ /* Set rotation */
+ ret = mcde_dss_set_rotation(dss_ctx->ddev,
+ (dss_ctx->display_rotation + rotation) % 360);
+ if (ret != 0)
+ goto exit;
+
+ ret = mcde_dss_set_video_mode(dss_ctx->ddev, &vmode);
+ if (ret != 0)
+ goto exit;
+
+
+ /* Apply */
+ ret = mcde_dss_apply_channel(dss_ctx->ddev);
+exit:
+ return ret;
+}
+
+static int release_prev_frame(struct dss_context *dss_ctx)
+{
+ int ret = 0;
+ int i;
+
+ /* Handle unpin of previous buffers */
+ for (i = 0; i < NUM_COMPDEV_BUFS; i++) {
+ if (dss_ctx->ovly_buffer[i].type ==
+ COMPDEV_PTR_HWMEM_BUF_NAME_OFFSET &&
+ dss_ctx->ovly_buffer[i].paddr != 0) {
+ hwmem_unpin(dss_ctx->ovly_buffer[i].alloc);
+ hwmem_release(dss_ctx->ovly_buffer[i].alloc);
+ }
+ dss_ctx->ovly_buffer[i].alloc = NULL;
+ dss_ctx->ovly_buffer[i].size = 0;
+ dss_ctx->ovly_buffer[i].paddr = 0;
+ }
+ return ret;
+
+}
+
+static enum b2r2_blt_fmt compdev_to_blt_format(enum compdev_fmt fmt)
+{
+ switch (fmt) {
+ case COMPDEV_FMT_RGBA8888:
+ return B2R2_BLT_FMT_32_BIT_ABGR8888;
+ case COMPDEV_FMT_RGB888:
+ return B2R2_BLT_FMT_24_BIT_RGB888;
+ case COMPDEV_FMT_RGB565:
+ return B2R2_BLT_FMT_16_BIT_RGB565;
+ case COMPDEV_FMT_YUV422:
+ return B2R2_BLT_FMT_CB_Y_CR_Y;
+ case COMPDEV_FMT_YCBCR42XMBN:
+ return B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE;
+ case COMPDEV_FMT_YUV420_SP:
+ return B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR;
+ case COMPDEV_FMT_YVU420_SP:
+ return B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR;
+ case COMPDEV_FMT_YUV420_P:
+ return B2R2_BLT_FMT_YUV420_PACKED_PLANAR;
+ default:
+ return B2R2_BLT_FMT_UNUSED;
+ }
+}
+
+static enum b2r2_blt_transform to_blt_transform
+ (enum compdev_rotation compdev_rot)
+{
+ switch (compdev_rot) {
+ case COMPDEV_ROT_0:
+ return B2R2_BLT_TRANSFORM_NONE;
+ case COMPDEV_ROT_90_CCW:
+ return B2R2_BLT_TRANSFORM_CCW_ROT_90;
+ case COMPDEV_ROT_180:
+ return B2R2_BLT_TRANSFORM_CCW_ROT_180;
+ case COMPDEV_ROT_270_CCW:
+ return B2R2_BLT_TRANSFORM_CCW_ROT_90;
+ default:
+ return B2R2_BLT_TRANSFORM_NONE;
+ }
+}
+
+static u32 get_stride(u32 width, enum compdev_fmt fmt)
+{
+ u32 stride = 0;
+ switch (fmt) {
+ case COMPDEV_FMT_RGB565:
+ stride = width * 2;
+ break;
+ case COMPDEV_FMT_RGB888:
+ stride = width * 3;
+ break;
+ case COMPDEV_FMT_RGBX8888:
+ stride = width * 4;
+ break;
+ case COMPDEV_FMT_RGBA8888:
+ stride = width * 4;
+ break;
+ case COMPDEV_FMT_YUV422:
+ stride = width * 2;
+ break;
+ case COMPDEV_FMT_YCBCR42XMBN:
+ case COMPDEV_FMT_YUV420_SP:
+ case COMPDEV_FMT_YVU420_SP:
+ case COMPDEV_FMT_YUV420_P:
+ stride = width;
+ break;
+ }
+
+ /* The display controller requires 8 byte aligned strides */
+ if (stride % 8)
+ stride += 8 - (stride % 8);
+
+ return stride;
+}
+
+static int alloc_comp_internal_img(enum compdev_fmt fmt,
+ u16 width, u16 height, struct compdev_img_internal **img_pp)
+{
+ struct hwmem_alloc *alloc;
+ int name;
+ u32 size;
+ u32 stride;
+ struct compdev_img_internal *img;
+
+ stride = get_stride(width, fmt);
+ size = stride * height;
+ size = PAGE_ALIGN(size);
+
+ img = kzalloc(sizeof(struct compdev_img_internal), GFP_KERNEL);
+
+ if (!img)
+ return -ENOMEM;
+
+ alloc = hwmem_alloc(size, HWMEM_ALLOC_HINT_WRITE_COMBINE |
+ HWMEM_ALLOC_HINT_UNCACHED,
+ (HWMEM_ACCESS_READ | HWMEM_ACCESS_WRITE |
+ HWMEM_ACCESS_IMPORT),
+ HWMEM_MEM_CONTIGUOUS_SYS);
+
+ if (IS_ERR(alloc)) {
+ kfree(img);
+ img = NULL;
+ return PTR_ERR(alloc);
+ }
+
+ name = hwmem_get_name(alloc);
+ if (name < 0) {
+ kfree(img);
+ img = NULL;
+ hwmem_release(alloc);
+ return name;
+ }
+
+ img->img.height = height;
+ img->img.width = width;
+ img->img.fmt = fmt;
+ img->img.pitch = stride;
+ img->img.buf.hwmem_buf_name = name;
+ img->img.buf.type = COMPDEV_PTR_HWMEM_BUF_NAME_OFFSET;
+ img->img.buf.offset = 0;
+ img->img.buf.len = size;
+
+ img->ref_count = 1;
+
+ *img_pp = img;
+
+ return 0;
+}
+
+static void free_comp_img_buf(struct compdev_img_internal *img,
+ struct device *dev)
+{
+ dev_dbg(dev, "%s\n", __func__);
+
+ if (img != NULL && img->ref_count) {
+ img->ref_count--;
+ if (img->ref_count == 0) {
+ struct hwmem_alloc *alloc;
+ if (img->img.buf.hwmem_buf_name > 0) {
+ alloc = hwmem_resolve_by_name(
+ img->img.buf.hwmem_buf_name);
+ if (IS_ERR(alloc)) {
+ dev_err(dev, "%s: Error getting Alloc "
+ "from HWMEM\n", __func__);
+ return;
+ }
+ /* Double release needed */
+ hwmem_release(alloc);
+ hwmem_release(alloc);
+ }
+ kfree(img);
+ }
+ }
+}
+
+struct compdev_img_internal *compdev_buffer_cache_get_image(
+ struct buffer_cache_context *cache_ctx, enum compdev_fmt fmt,
+ u16 width, u16 height)
+{
+ int i;
+ struct compdev_img_internal *img = NULL;
+
+ dev_dbg(cache_ctx->dev, "%s\n", __func__);
+
+ /* First check for a cache hit */
+ if (cache_ctx->unused_counter > 0) {
+ u8 active_index = cache_ctx->index;
+ struct compdev_img_internal *temp =
+ cache_ctx->img[active_index];
+ if (temp != NULL && temp->img.fmt == fmt &&
+ temp->img.width == width &&
+ temp->img.height == height) {
+ img = temp;
+ cache_ctx->unused_counter = 0;
+ }
+ }
+ /* Check if there was a cache hit */
+ if (img == NULL) {
+ /* Create new buffers and release old */
+ for (i = 0; i < BUFFER_CACHE_DEPTH; i++) {
+ if (cache_ctx->img[i]) {
+ free_comp_img_buf(cache_ctx->img[i],
+ cache_ctx->dev);
+ cache_ctx->img[i] = NULL;
+ }
+ cache_ctx->index = 0;
+ if (alloc_comp_internal_img(fmt, width, height,
+ &cache_ctx->img[i]))
+ dev_err(cache_ctx->dev,
+ "%s: Allocation error\n",
+ __func__);
+ }
+ img = cache_ctx->img[0];
+ }
+
+ if (img != NULL) {
+ img->ref_count++;
+ cache_ctx->unused_counter = 0;
+ cache_ctx->index++;
+ if (cache_ctx->index >= BUFFER_CACHE_DEPTH)
+ cache_ctx->index = 0;
+ }
+
+ return img;
+}
+
+static void compdev_buffer_cache_mark_frame
+ (struct buffer_cache_context *cache_ctx)
+{
+ if (cache_ctx->unused_counter < 2)
+ cache_ctx->unused_counter++;
+ if (cache_ctx->unused_counter == 2) {
+ int i;
+ for (i = 0; i < BUFFER_CACHE_DEPTH; i++) {
+ if (cache_ctx->img[i]) {
+ free_comp_img_buf(cache_ctx->img[i],
+ cache_ctx->dev);
+ cache_ctx->img[i] = NULL;
+ }
+ }
+ }
+}
+
+static bool check_hw_format(enum compdev_fmt fmt)
+{
+ if (fmt == COMPDEV_FMT_RGB565 ||
+ fmt == COMPDEV_FMT_RGB888 ||
+ fmt == COMPDEV_FMT_RGBA8888 ||
+ fmt == COMPDEV_FMT_RGBX8888 ||
+ fmt == COMPDEV_FMT_YUV422)
+ return true;
+ else
+ return false;
+}
+
+static enum compdev_fmt find_compatible_fmt(enum compdev_fmt fmt, bool rotation)
+{
+ if (!rotation) {
+ switch (fmt) {
+ case COMPDEV_FMT_RGB565:
+ case COMPDEV_FMT_RGB888:
+ case COMPDEV_FMT_RGBA8888:
+ case COMPDEV_FMT_RGBX8888:
+ return fmt;
+ case COMPDEV_FMT_YUV422:
+ case COMPDEV_FMT_YCBCR42XMBN:
+ case COMPDEV_FMT_YUV420_SP:
+ case COMPDEV_FMT_YVU420_SP:
+ case COMPDEV_FMT_YUV420_P:
+ return COMPDEV_FMT_YUV422;
+ default:
+ return COMPDEV_FMT_RGBA8888;
+ }
+ } else {
+ switch (fmt) {
+ case COMPDEV_FMT_RGB565:
+ case COMPDEV_FMT_RGB888:
+ case COMPDEV_FMT_RGBA8888:
+ case COMPDEV_FMT_RGBX8888:
+ return fmt;
+ case COMPDEV_FMT_YUV422:
+ case COMPDEV_FMT_YCBCR42XMBN:
+ case COMPDEV_FMT_YUV420_SP:
+ case COMPDEV_FMT_YVU420_SP:
+ case COMPDEV_FMT_YUV420_P:
+ return COMPDEV_FMT_RGB888;
+ default:
+ return COMPDEV_FMT_RGBA8888;
+ }
+ }
+}
+
+static void compdev_callback_worker_function(struct work_struct *work)
+{
+ struct compdev_post_callback_work *cb_work =
+ (struct compdev_post_callback_work *)work;
+
+ if (cb_work->pb_cb != NULL)
+ cb_work->pb_cb(cb_work->cb_data, cb_work->img);
+}
+static void compdev_blt_worker_function(struct work_struct *work)
+{
+ struct compdev_blt_work *blt_work = (struct compdev_blt_work *)work;
+ struct compdev_img *src_img;
+ struct compdev_img *dst_img;
+ struct b2r2_blt_req req;
+ int req_id;
+
+ dev_dbg(blt_work->dev, "%s\n", __func__);
+
+ src_img = blt_work->src_img;
+ dst_img = &blt_work->dst_img->img;
+
+ memset(&req, 0, sizeof(req));
+ req.size = sizeof(req);
+
+ if (src_img->buf.type == COMPDEV_PTR_PHYSICAL) {
+ req.src_img.buf.type = B2R2_BLT_PTR_PHYSICAL;
+ req.src_img.buf.fd = src_img->buf.fd;
+ } else {
+ struct hwmem_alloc *alloc;
+
+ req.src_img.buf.type = B2R2_BLT_PTR_HWMEM_BUF_NAME_OFFSET;
+ req.src_img.buf.hwmem_buf_name = src_img->buf.hwmem_buf_name;
+
+ alloc = hwmem_resolve_by_name(src_img->buf.hwmem_buf_name);
+ if (IS_ERR(alloc)) {
+ dev_warn(blt_work->dev,
+ "HWMEM resolve failed\n");
+ }
+ hwmem_set_access(alloc,
+ HWMEM_ACCESS_READ | HWMEM_ACCESS_IMPORT,
+ task_tgid_nr(current));
+ hwmem_release(alloc);
+ }
+ req.src_img.pitch = src_img->pitch;
+ req.src_img.buf.offset = src_img->buf.offset;
+ req.src_img.buf.len = src_img->buf.len;
+ req.src_img.fmt = compdev_to_blt_format(src_img->fmt);
+ req.src_img.width = src_img->width;
+ req.src_img.height = src_img->height;
+
+ req.src_rect.x = src_img->src_rect.x;
+ req.src_rect.y = src_img->src_rect.y;
+ req.src_rect.width = src_img->src_rect.width;
+ req.src_rect.height = src_img->src_rect.height;
+
+ if (dst_img->buf.type == COMPDEV_PTR_PHYSICAL) {
+ req.dst_img.buf.type = B2R2_BLT_PTR_PHYSICAL;
+ req.dst_img.buf.fd = dst_img->buf.fd;
+ } else {
+ req.dst_img.buf.type = B2R2_BLT_PTR_HWMEM_BUF_NAME_OFFSET;
+ req.dst_img.buf.hwmem_buf_name = dst_img->buf.hwmem_buf_name;
+ }
+ req.dst_img.pitch = dst_img->pitch;
+ req.dst_img.buf.offset = dst_img->buf.offset;
+ req.dst_img.buf.len = dst_img->buf.len;
+ req.dst_img.fmt = compdev_to_blt_format(dst_img->fmt);
+ req.dst_img.width = dst_img->width;
+ req.dst_img.height = dst_img->height;
+
+ if (blt_work->mcde_rotation)
+ req.transform = B2R2_BLT_TRANSFORM_NONE;
+ else
+ req.transform = to_blt_transform(src_img->rotation);
+ req.dst_rect.x = 0;
+ req.dst_rect.y = 0;
+ req.dst_rect.width = src_img->dst_rect.width;
+ req.dst_rect.height = src_img->dst_rect.height;
+
+ req.global_alpha = 0xff;
+ req.flags = B2R2_BLT_FLAG_DITHER;
+
+ req_id = b2r2_blt_request(blt_work->blt_handle, &req);
+
+ if (b2r2_blt_synch(blt_work->blt_handle, req_id) < 0) {
+ dev_err(blt_work->dev,
+ "%s: Could not perform b2r2_blt_synch",
+ __func__);
+ }
+
+ dst_img->src_rect.x = 0;
+ dst_img->src_rect.x = 0;
+ dst_img->src_rect.width = dst_img->width;
+ dst_img->src_rect.height = dst_img->height;
+
+ dst_img->dst_rect.x = src_img->dst_rect.x;
+ dst_img->dst_rect.y = src_img->dst_rect.y;
+ dst_img->dst_rect.width = src_img->dst_rect.width;
+ dst_img->dst_rect.height = src_img->dst_rect.height;
+
+ dst_img->rotation = src_img->rotation;
+}
+
+static int compdev_post_buffer_locked(struct compdev *cd,
+ struct compdev_img *src_img)
+{
+ int ret = 0;
+ int i;
+ bool transform_needed = false;
+ struct compdev_img *resulting_img;
+ struct compdev_blt_work blt_work;
+ struct compdev_post_callback_work cb_work;
+ bool callback_work = false;
+ bool bypass_case = false;
+
+ dev_dbg(cd->dev, "%s\n", __func__);
+
+ /* Free potential temp buffers */
+ for (i = 0; i < cd->dss_ctx.temp_img_count; i++)
+ free_comp_img_buf(cd->dss_ctx.temp_img[i], cd->dev);
+ cd->dss_ctx.temp_img_count = 0;
+
+ /* Check for bypass images */
+ if (src_img->flags & COMPDEV_BYPASS_FLAG)
+ bypass_case = true;
+
+ /* Handle callback */
+ if (cd->pb_cb != NULL) {
+ callback_work = true;
+ INIT_WORK((struct work_struct *)&cb_work,
+ compdev_callback_worker_function);
+ cb_work.img = src_img;
+ cb_work.pb_cb = cd->pb_cb;
+ cb_work.cb_data = cd->cb_data;
+ cb_work.dev = cd->dev;
+ queue_work(cd->worker_thread, (struct work_struct *)&cb_work);
+ }
+
+ if (!bypass_case) {
+ /* Determine if transform is needed */
+ /* First check scaling */
+ if ((src_img->rotation == COMPDEV_ROT_0 ||
+ src_img->rotation == COMPDEV_ROT_180) &&
+ (src_img->src_rect.width != src_img->dst_rect.width ||
+ src_img->src_rect.height != src_img->dst_rect.height))
+ transform_needed = true;
+ else if ((src_img->rotation == COMPDEV_ROT_90_CCW ||
+ src_img->rotation == COMPDEV_ROT_270_CCW) &&
+ (src_img->src_rect.width != src_img->dst_rect.height ||
+ src_img->src_rect.height != src_img->dst_rect.width))
+ transform_needed = true;
+
+ if (!transform_needed && check_hw_format(src_img->fmt) == false)
+ transform_needed = true;
+
+ if (transform_needed) {
+ u16 width = 0;
+ u16 height = 0;
+ enum compdev_fmt fmt;
+
+ INIT_WORK((struct work_struct *)&blt_work,
+ compdev_blt_worker_function);
+
+ if (cd->dss_ctx.blt_handle == 0) {
+ dev_dbg(cd->dev, "%s: B2R2 opened\n", __func__);
+ cd->dss_ctx.blt_handle = b2r2_blt_open();
+ if (cd->dss_ctx.blt_handle < 0) {
+ dev_warn(cd->dev,
+ "%s(%d): Failed to "
+ "open b2r2 device\n",
+ __func__, __LINE__);
+ }
+ }
+ blt_work.blt_handle = cd->dss_ctx.blt_handle;
+ blt_work.src_img = src_img;
+ blt_work.mcde_rotation = cd->mcde_rotation;
+
+ width = src_img->dst_rect.width;
+ height = src_img->dst_rect.height;
+
+ fmt = find_compatible_fmt(src_img->fmt,
+ (!cd->mcde_rotation) &&
+ (src_img->rotation != COMPDEV_ROT_0));
+
+ blt_work.dst_img = compdev_buffer_cache_get_image
+ (&cd->dss_ctx.cache_ctx,
+ fmt, width, height);
+
+ blt_work.dst_img->img.flags = src_img->flags;
+ blt_work.dev = cd->dev;
+
+ queue_work(cd->worker_thread,
+ (struct work_struct *)&blt_work);
+ flush_work_sync((struct work_struct *)&blt_work);
+
+ resulting_img = &blt_work.dst_img->img;
+
+ cd->dss_ctx.temp_img[cd->dss_ctx.temp_img_count] =
+ blt_work.dst_img;
+ cd->dss_ctx.temp_img_count++;
+
+ } else {
+ resulting_img = src_img;
+ }
+
+ if (!cd->mcde_rotation)
+ resulting_img->rotation = COMPDEV_ROT_0;
+
+ cd->images[cd->image_count] = resulting_img;
+ cd->image_count++;
+
+ /* make sure that a potential callback has returned */
+ if (callback_work)
+ flush_work_sync((struct work_struct *)&cb_work);
+
+ if (cd->sync_count > 1) {
+ cd->sync_count--;
+ mutex_unlock(&cd->lock);
+ /* Wait for fence */
+ wait_for_completion(&cd->fence);
+ mutex_lock(&cd->lock);
+ } else {
+ struct compdev_img *img1 = NULL;
+ struct compdev_img *img2 = NULL;
+
+ if (cd->sync_count)
+ cd->sync_count--;
+
+ img1 = cd->images[0];
+ if (cd->image_count)
+ img2 = cd->images[1];
+
+ /* Do the refresh */
+ compdev_post_buffers_dss(&cd->dss_ctx, img1, img2);
+ compdev_buffer_cache_mark_frame
+ (&cd->dss_ctx.cache_ctx);
+
+ if (cd->s_info.img_count > 1) {
+ /* Releasing fence */
+ complete(&cd->fence);
+ }
+
+ cd->sync_count = 0;
+ cd->image_count = 0;
+ cd->images[0] = NULL;
+ cd->images[1] = NULL;
+ }
+ } else {
+ /* make sure that a potential callback has returned */
+ if (callback_work)
+ flush_work_sync((struct work_struct *)&cb_work);
+ }
+
+ return ret;
+}
+
+static int compdev_post_buffers_dss(struct dss_context *dss_ctx,
+ struct compdev_img *img1, struct compdev_img *img2)
+{
+ int ret = 0;
+ int i = 0;
+
+ struct compdev_img *fb_img = NULL;
+ struct compdev_img *ovly_img = NULL;
+
+ /* Unpin the previous frame */
+ release_prev_frame(dss_ctx);
+
+ /* Set channel rotation */
+ if (img1 != NULL &&
+ (dss_ctx->current_buffer_rotation != img1->rotation)) {
+ if (compdev_update_rotation(dss_ctx, img1->rotation) != 0)
+ dev_warn(dss_ctx->dev,
+ "Failed to update MCDE rotation "
+ "(img1->rotation = %d), %d\n",
+ img1->rotation, ret);
+ else
+ dss_ctx->current_buffer_rotation = img1->rotation;
+ }
+
+ if ((img1 != NULL) && (img1->flags & COMPDEV_OVERLAY_FLAG))
+ ovly_img = img1;
+ else if (img1 != NULL)
+ fb_img = img1;
+
+
+ if ((img2 != NULL) && (img2->flags & COMPDEV_OVERLAY_FLAG))
+ ovly_img = img2;
+ else if (img2 != NULL)
+ fb_img = img2;
+
+ /* Handle buffers */
+ if (fb_img != NULL) {
+ ret = compdev_setup_ovly(fb_img,
+ &dss_ctx->ovly_buffer[i], dss_ctx->ovly[0], 1, dss_ctx);
+ if (ret)
+ dev_warn(dss_ctx->dev,
+ "Failed to setup overlay[%d], %d\n", 0, ret);
+ i++;
+ } else {
+ disable_overlay(dss_ctx->ovly[0]);
+ }
+
+
+ if (ovly_img != NULL) {
+ ret = compdev_setup_ovly(ovly_img,
+ &dss_ctx->ovly_buffer[i], dss_ctx->ovly[1], 0, dss_ctx);
+ if (ret)
+ dev_warn(dss_ctx->dev,
+ "Failed to setup overlay[%d], %d\n", 1, ret);
+ } else {
+ disable_overlay(dss_ctx->ovly[1]);
+ }
+
+ /* Do the display update */
+ mcde_dss_update_overlay(dss_ctx->ovly[0], true);
+
+ return ret;
+}
+
+static int compdev_post_scene_info_locked(struct compdev *cd,
+ struct compdev_scene_info *s_info)
+{
+ int ret = 0;
+
+ dev_dbg(cd->dev, "%s\n", __func__);
+
+ cd->s_info = *s_info;
+ cd->sync_count = cd->s_info.img_count;
+
+ /* always complete the fence in case someone is hanging incorrectly. */
+ complete(&cd->fence);
+ init_completion(&cd->fence);
+
+ /* Handle callback */
+ if (cd->si_cb != NULL) {
+ mutex_unlock(&cd->lock);
+ cd->si_cb(cd->cb_data, s_info);
+ mutex_lock(&cd->lock);
+ }
+ return ret;
+}
+
+
+static int compdev_get_size_locked(struct dss_context *dss_ctx,
+ struct compdev_size *size)
+{
+ int ret = 0;
+ if ((dss_ctx->display_rotation) % 180) {
+ size->height = dss_ctx->phy_size.width;
+ size->width = dss_ctx->phy_size.height;
+ } else {
+ size->height = dss_ctx->phy_size.height;
+ size->width = dss_ctx->phy_size.width;
+ }
+
+ return ret;
+}
+
+static int compdev_get_listener_state_locked(struct compdev *cd,
+ enum compdev_listener_state *state)
+{
+ int ret = 0;
+
+ *state = COMPDEV_LISTENER_OFF;
+ if (cd->pb_cb != NULL)
+ *state = COMPDEV_LISTENER_ON;
+ return ret;
+}
+
+static long compdev_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+ struct compdev *cd = (struct compdev *)file->private_data;
+ struct compdev_img img;
+ struct compdev_scene_info s_info;
+
+ mutex_lock(&cd->lock);
+
+ switch (cmd) {
+ case COMPDEV_GET_SIZE_IOC:
+ {
+ struct compdev_size tmp;
+ compdev_get_size_locked(&cd->dss_ctx, &tmp);
+ ret = copy_to_user((void __user *)arg, &tmp,
+ sizeof(tmp));
+ if (ret)
+ ret = -EFAULT;
+ }
+ break;
+ case COMPDEV_GET_LISTENER_STATE_IOC:
+ {
+ enum compdev_listener_state state;
+ compdev_get_listener_state_locked(cd, &state);
+ ret = copy_to_user((void __user *)arg, &state,
+ sizeof(state));
+ if (ret)
+ ret = -EFAULT;
+ }
+ break;
+ case COMPDEV_POST_BUFFER_IOC:
+ memset(&img, 0, sizeof(img));
+ /* Get the user data */
+ if (copy_from_user(&img, (void *)arg, sizeof(img))) {
+ dev_warn(cd->dev,
+ "%s: copy_from_user failed\n",
+ __func__);
+ mutex_unlock(&cd->lock);
+ return -EFAULT;
+ }
+ ret = compdev_post_buffer_locked(cd, &img);
+
+ break;
+ case COMPDEV_POST_SCENE_INFO_IOC:
+ memset(&s_info, 0, sizeof(s_info));
+ /* Get the user data */
+ if (copy_from_user(&s_info, (void *)arg, sizeof(s_info))) {
+ dev_warn(cd->dev,
+ "%s: copy_from_user failed\n",
+ __func__);
+ mutex_unlock(&cd->lock);
+ return -EFAULT;
+ }
+ ret = compdev_post_scene_info_locked(cd, &s_info);
+
+ break;
+
+ default:
+ ret = -ENOSYS;
+ }
+
+ mutex_unlock(&cd->lock);
+
+ return ret;
+}
+
+static const struct file_operations compdev_fops = {
+ .open = compdev_open,
+ .release = compdev_release,
+ .unlocked_ioctl = compdev_ioctl,
+};
+
+static void init_compdev(struct compdev *cd, const char *name)
+{
+ mutex_init(&cd->lock);
+ INIT_LIST_HEAD(&cd->list);
+ init_completion(&cd->fence);
+
+ cd->mdev.minor = MISC_DYNAMIC_MINOR;
+ cd->mdev.name = name;
+ cd->mdev.fops = &compdev_fops;
+ cd->dev = cd->mdev.this_device;
+}
+
+static void init_dss_context(struct dss_context *dss_ctx,
+ struct mcde_display_device *ddev, struct compdev *cd)
+{
+ dss_ctx->ddev = ddev;
+ dss_ctx->dev = cd->dev;
+ memset(&dss_ctx->cache_ctx, 0, sizeof(struct buffer_cache_context));
+ dss_ctx->cache_ctx.dev = dss_ctx->dev;
+}
+
+int compdev_create(struct mcde_display_device *ddev,
+ struct mcde_overlay *parent_ovly, bool mcde_rotation)
+{
+ int ret = 0;
+ int i;
+ struct compdev *cd;
+ struct mcde_video_mode vmode;
+ struct mcde_overlay_info info;
+
+ char name[10];
+
+ if (dev_counter == 0) {
+ for (i = 0; i < MAX_NBR_OF_COMPDEVS; i++)
+ compdevs[i] = NULL;
+ }
+
+ if (dev_counter > MAX_NBR_OF_COMPDEVS)
+ return -ENOMEM;
+
+ cd = kzalloc(sizeof(struct compdev), GFP_KERNEL);
+ if (!cd)
+ return -ENOMEM;
+
+ compdevs[dev_counter] = cd;
+ cd->dev_index = dev_counter;
+
+ snprintf(name, sizeof(name), "%s%d", COMPDEV_DEFAULT_DEVICE_PREFIX,
+ dev_counter++);
+ init_compdev(cd, name);
+
+ init_dss_context(&cd->dss_ctx, ddev, cd);
+
+ mcde_dss_get_video_mode(ddev, &vmode);
+
+ cd->worker_thread = create_workqueue(name);
+ if (!cd->worker_thread) {
+ ret = -ENOMEM;
+ goto fail_workqueue;
+ }
+
+ cd->dss_ctx.ovly[0] = parent_ovly;
+ if (!cd->dss_ctx.ovly[0]) {
+ ret = -ENOMEM;
+ goto fail_create_ovly;
+ }
+
+ for (i = 1; i < NUM_COMPDEV_BUFS; i++) {
+ cd->dss_ctx.ovly[i] = mcde_dss_create_overlay(ddev, &info);
+ if (!cd->dss_ctx.ovly[i]) {
+ ret = -ENOMEM;
+ goto fail_create_ovly;
+ }
+ if (mcde_dss_enable_overlay(cd->dss_ctx.ovly[i]))
+ goto fail_create_ovly;
+ if (disable_overlay(cd->dss_ctx.ovly[i]))
+ goto fail_create_ovly;
+ }
+
+ mcde_dss_get_native_resolution(ddev, &cd->dss_ctx.phy_size.width,
+ &cd->dss_ctx.phy_size.height);
+ cd->dss_ctx.display_rotation = mcde_dss_get_rotation(ddev);
+ cd->dss_ctx.current_buffer_rotation = 0;
+
+ cd->mcde_rotation = mcde_rotation;
+
+ ret = misc_register(&cd->mdev);
+ if (ret)
+ goto fail_register_misc;
+ mutex_lock(&dev_list_lock);
+ list_add_tail(&cd->list, &dev_list);
+ mutex_unlock(&dev_list_lock);
+
+ goto out;
+
+fail_register_misc:
+fail_create_ovly:
+ for (i = 0; i < NUM_COMPDEV_BUFS; i++) {
+ if (cd->dss_ctx.ovly[i])
+ mcde_dss_destroy_overlay(cd->dss_ctx.ovly[i]);
+ }
+fail_workqueue:
+ kfree(cd);
+out:
+ return ret;
+}
+
+
+int compdev_get(int dev_idx, struct compdev **cd_pp)
+{
+ struct compdev *cd;
+ cd = NULL;
+
+ if (dev_idx >= MAX_NBR_OF_COMPDEVS)
+ return -ENOMEM;
+
+ cd = compdevs[dev_idx];
+ if (cd != NULL) {
+ mutex_lock(&cd->lock);
+ cd->ref_count++;
+ mutex_unlock(&cd->lock);
+ *cd_pp = cd;
+ return 0;
+ } else {
+ return -ENOMEM;
+ }
+}
+EXPORT_SYMBOL(compdev_get);
+
+int compdev_put(struct compdev *cd)
+{
+ int ret = 0;
+ if (cd == NULL)
+ return -ENOMEM;
+
+ mutex_lock(&cd->lock);
+ cd->ref_count--;
+ if (cd->ref_count < 0)
+ dev_warn(cd->dev,
+ "%s: Incorrect ref count\n", __func__);
+ mutex_unlock(&cd->lock);
+ return ret;
+}
+EXPORT_SYMBOL(compdev_put);
+
+int compdev_get_size(struct compdev *cd, struct compdev_size *size)
+{
+ int ret = 0;
+ if (cd == NULL)
+ return -ENOMEM;
+
+ mutex_lock(&cd->lock);
+
+ ret = compdev_get_size_locked(&cd->dss_ctx, size);
+
+ mutex_unlock(&cd->lock);
+ return ret;
+}
+EXPORT_SYMBOL(compdev_get_size);
+
+int compdev_get_listener_state(struct compdev *cd,
+ enum compdev_listener_state *listener_state)
+{
+ int ret = 0;
+ if (cd == NULL)
+ return -ENOMEM;
+
+ mutex_lock(&cd->lock);
+
+ ret = compdev_get_listener_state_locked(cd, listener_state);
+
+ mutex_unlock(&cd->lock);
+ return ret;
+}
+EXPORT_SYMBOL(compdev_get_listener_state);
+
+
+int compdev_post_buffer(struct compdev *cd, struct compdev_img *img)
+{
+ int ret = 0;
+ if (cd == NULL)
+ return -ENOMEM;
+
+ mutex_lock(&cd->lock);
+
+ ret = compdev_post_buffer_locked(cd, img);
+
+ mutex_unlock(&cd->lock);
+ return ret;
+}
+EXPORT_SYMBOL(compdev_post_buffer);
+
+int compdev_post_scene_info(struct compdev *cd,
+ struct compdev_scene_info *s_info)
+{
+ int ret = 0;
+ if (cd == NULL)
+ return -ENOMEM;
+
+ mutex_lock(&cd->lock);
+
+ ret = compdev_post_scene_info_locked(cd, s_info);
+
+ mutex_unlock(&cd->lock);
+ return ret;
+}
+EXPORT_SYMBOL(compdev_post_scene_info);
+
+int compdev_register_listener_callbacks(struct compdev *cd, void *data,
+ post_buffer_callback pb_cb, post_scene_info_callback si_cb)
+{
+ int ret = 0;
+ if (cd == NULL)
+ return -ENOMEM;
+ mutex_lock(&cd->lock);
+ cd->cb_data = data;
+ cd->pb_cb = pb_cb;
+ cd->si_cb = si_cb;
+ mutex_unlock(&cd->lock);
+ return ret;
+}
+EXPORT_SYMBOL(compdev_register_listener_callbacks);
+
+int compdev_deregister_callbacks(struct compdev *cd)
+{
+ int ret = 0;
+ if (cd == NULL)
+ return -ENOMEM;
+ mutex_lock(&cd->lock);
+ cd->cb_data = NULL;
+ cd->pb_cb = NULL;
+ cd->si_cb = NULL;
+ mutex_unlock(&cd->lock);
+ return ret;
+}
+EXPORT_SYMBOL(compdev_deregister_callbacks);
+
+void compdev_destroy(struct mcde_display_device *ddev)
+{
+ struct compdev *cd;
+ struct compdev *tmp;
+ int i;
+
+ mutex_lock(&dev_list_lock);
+ list_for_each_entry_safe(cd, tmp, &dev_list, list) {
+ if (cd->dss_ctx.ddev == ddev) {
+ list_del(&cd->list);
+ misc_deregister(&cd->mdev);
+ for (i = 1; i < NUM_COMPDEV_BUFS; i++)
+ mcde_dss_destroy_overlay(cd->dss_ctx.ovly[i]);
+ b2r2_blt_close(cd->dss_ctx.blt_handle);
+
+ release_prev_frame(&cd->dss_ctx);
+
+ /* Free potential temp buffers */
+ for (i = 0; i < cd->dss_ctx.temp_img_count; i++)
+ free_comp_img_buf(cd->dss_ctx.temp_img[i],
+ cd->dev);
+
+ for (i = 0; i < BUFFER_CACHE_DEPTH; i++) {
+ if (cd->dss_ctx.cache_ctx.img[i]) {
+ free_comp_img_buf
+ (cd->dss_ctx.cache_ctx.img[i],
+ cd->dev);
+ cd->dss_ctx.cache_ctx.img[i] = NULL;
+ }
+ }
+
+ destroy_workqueue(cd->worker_thread);
+ kfree(cd);
+ break;
+ }
+ }
+ dev_counter--;
+ mutex_unlock(&dev_list_lock);
+}
+
+static void compdev_destroy_all(void)
+{
+ struct compdev *cd;
+ struct compdev *tmp;
+ int i;
+
+ mutex_lock(&dev_list_lock);
+ list_for_each_entry_safe(cd, tmp, &dev_list, list) {
+ list_del(&cd->list);
+ misc_deregister(&cd->mdev);
+ for (i = 0; i < NUM_COMPDEV_BUFS; i++)
+ mcde_dss_destroy_overlay(cd->dss_ctx.ovly[i]);
+
+ release_prev_frame(&cd->dss_ctx);
+ /* Free potential temp buffers */
+ for (i = 0; i < cd->dss_ctx.temp_img_count; i++)
+ free_comp_img_buf(cd->dss_ctx.temp_img[i], cd->dev);
+
+ for (i = 0; i < BUFFER_CACHE_DEPTH; i++) {
+ if (cd->dss_ctx.cache_ctx.img[i]) {
+ free_comp_img_buf
+ (cd->dss_ctx.cache_ctx.img[i],
+ cd->dev);
+ cd->dss_ctx.cache_ctx.img[i] = NULL;
+ }
+ }
+
+ kfree(cd);
+ }
+ mutex_unlock(&dev_list_lock);
+
+ mutex_destroy(&dev_list_lock);
+}
+
+static int __init compdev_init(void)
+{
+ pr_info("%s\n", __func__);
+
+ mutex_init(&dev_list_lock);
+
+ return 0;
+}
+module_init(compdev_init);
+
+static void __exit compdev_exit(void)
+{
+ compdev_destroy_all();
+ pr_info("%s\n", __func__);
+}
+module_exit(compdev_exit);
+
+MODULE_AUTHOR("Anders Bauer <anders.bauer@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Display overlay device driver");
+
diff --git a/drivers/misc/db8500-modem-trace.c b/drivers/misc/db8500-modem-trace.c
new file mode 100644
index 00000000000..b757b742121
--- /dev/null
+++ b/drivers/misc/db8500-modem-trace.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Authors: Michel JAOUEN <michel.jaouen@stericsson.com>
+ * Maxime COQUELIN <maxime.coquelin-nonst@stericsson.com>
+ * for ST-Ericsson
+ * License terms: GNU General Public License (GPL), version 2
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/poll.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/mman.h>
+#include <linux/db8500-modem-trace.h>
+
+#include <mach/hardware.h>
+
+#define DEVICE_NAME "db8500-modem-trace"
+
+/* activation of this flag triggers an initialization of 2 buffers
+ * 4kbytes , id 0xdeadbeef
+ * and 16Kbytes id 0xfadafada
+ * we assume that platform provides minimum 20Kbytes. */
+
+struct trace {
+ u32 start;
+ u32 end;
+ u32 mdm_base;
+ u32 ape_base;
+ void __iomem *area;
+ /* this spinlock to forbid concurrent access on the same trace buffer */
+ spinlock_t lock;
+ struct device *dev;
+ struct miscdevice misc_dev;
+};
+
+struct trace_modem {
+ u32 phys_addr;
+ u8 filler;
+};
+
+static struct trace *trace_priv;
+
+
+/* all this definition are linked to modem interface */
+#define MODEM_MARKER 0x88
+/* free marker is also written on filler */
+#define FREE_MARKER 0xa5
+#define FREE_MARKER_2 0xa5a5
+#define READ_MARKER 0x5a
+
+struct buffer_header {
+ u8 pattern;
+ u8 filler;
+ u16 head_size;
+};
+
+
+static int trace_read(unsigned long arg)
+{
+ struct modem_trace_req req;
+ struct buffer_header *pt;
+ char tmp_char;
+
+ if (copy_from_user(&req, (struct modem_trace_req *)arg,
+ sizeof(struct modem_trace_req)))
+ return -EFAULT;
+
+ /* compute Modem physical address to APE physical address range */
+ if (req.phys_addr < trace_priv->mdm_base) {
+ dev_err(trace_priv->dev, "MODEM ADDR uncorrect\n");
+ return -EINVAL;
+ }
+ req.phys_addr += trace_priv->ape_base - trace_priv->mdm_base;
+
+ /* check request is in the range and aligned */
+ if ((req.phys_addr % 4 != 0)
+ || (req.phys_addr < trace_priv->start)
+ || (req.phys_addr + req.size) >= trace_priv->end) {
+ dev_err(trace_priv->dev, "req out of range %x %x\n",
+ req.phys_addr, req.size);
+ return -EINVAL;
+ }
+
+ /* perform access to memory area */
+ pt = (struct buffer_header *)((u32)trace_priv->area +
+ req.phys_addr - trace_priv->start);
+
+ /* in case of several request coming on same trace buffer take a
+ * spinlock */
+ spin_lock(&trace_priv->lock);
+ if (pt->pattern != MODEM_MARKER) {
+ /* pattern and size not matching */
+ dev_err(trace_priv->dev, "req not matching filler %x/%x \
+ or/and pattern %x\n", req.filler, pt->filler,
+ pt->pattern);
+ spin_unlock(&trace_priv->lock);
+ return -EINVAL;
+ }
+ /* mark pattern as read and unlock spin */
+ pt->pattern = READ_MARKER;
+ spin_unlock(&trace_priv->lock);
+
+ req.size -= copy_to_user(req.buff, pt, req.size);
+
+ pt->pattern = FREE_MARKER;
+ pt->filler = FREE_MARKER;
+ tmp_char = MODEM_MARKER;
+
+ /* Update marker for trace tool */
+ if (copy_to_user(req.buff, &tmp_char, 1))
+ return -EFAULT;
+
+ /* Update effective written size */
+ if (copy_to_user((struct modem_trace_req *)arg, &req,
+ sizeof(struct modem_trace_req)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int trace_mmapdump(struct file *file, struct vm_area_struct *vma)
+{
+ unsigned long vma_start = vma->vm_start;
+
+ if (vma->vm_flags & VM_WRITE)
+ return -EPERM;
+
+ if ((vma->vm_end - vma->vm_start) <
+ (trace_priv->end - trace_priv->start))
+ return -EINVAL;
+ if (remap_pfn_range(vma,
+ vma_start,
+ trace_priv->start >> PAGE_SHIFT,
+ trace_priv->end - trace_priv->start,
+ vma->vm_page_prot))
+ return -EAGAIN;
+ return 0;
+}
+
+static long trace_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ long ret = 0;
+ void __user *argp = (void __user *)arg;
+ unsigned long size = trace_priv->end-trace_priv->start;
+
+ switch (cmd) {
+ case TM_GET_DUMPINFO:
+ ret = put_user(size, (unsigned long *)argp);
+ break;
+ case TM_TRACE_REQ:
+ ret = trace_read(arg);
+ break;
+
+ default:
+ ret = -EPERM;
+ break;
+ }
+ return ret;
+}
+
+static const struct file_operations trace_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = trace_ioctl,
+ .mmap = trace_mmapdump
+};
+
+static int trace_probe(struct platform_device *pdev)
+{
+ int rv = 0;
+ struct db8500_trace_platform_data *pdata = pdev->dev.platform_data;
+ /* retrieve area descriptor from platform device ressource */
+ struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if ((mem->start == 0) && (mem->end == 0)) {
+ rv = -EINVAL;
+ goto out;
+ }
+
+ if ((pdata->ape_base == 0) || (pdata->modem_base == 0)) {
+ rv = -EINVAL;
+ goto out;
+ }
+
+ trace_priv = kzalloc(sizeof(*trace_priv), GFP_ATOMIC);
+ if (!trace_priv) {
+ rv = -ENOMEM;
+ goto out;
+ }
+
+ trace_priv->dev = &pdev->dev;
+ trace_priv->misc_dev.minor = MISC_DYNAMIC_MINOR;
+ trace_priv->misc_dev.name = DEVICE_NAME;
+ trace_priv->misc_dev.fops = &trace_fops;
+ trace_priv->area = (void __iomem *)ioremap_nocache(mem->start,
+ resource_size(mem));
+ if (!trace_priv->area) {
+ rv = -ENOMEM;
+ goto outfree;
+ }
+
+ trace_priv->start = mem->start;
+ trace_priv->end = mem->end;
+
+ trace_priv->mdm_base = pdata->modem_base;
+ trace_priv->ape_base = pdata->ape_base;
+
+ /* spin allowing smp access for reading/writing trace buffer header */
+ spin_lock_init(&trace_priv->lock);
+
+ rv = misc_register(&trace_priv->misc_dev);
+ if (rv) {
+ dev_err(&pdev->dev, "can't misc_register\n");
+ goto outunmap;
+ }
+
+ return rv;
+
+outunmap:
+ iounmap(trace_priv->area);
+outfree:
+ kfree(trace_priv);
+out:
+ return rv;
+
+}
+
+static int trace_remove(struct platform_device *pdev)
+{
+ int rv = 0;
+
+ if (trace_priv) {
+ rv = misc_deregister(&trace_priv->misc_dev);
+ iounmap(trace_priv->area);
+ kfree(trace_priv);
+ }
+
+ return rv;
+}
+
+static struct platform_driver trace_driver = {
+ .probe = trace_probe,
+ .remove = trace_remove,
+ .driver = {
+ .name = "db8500-modem-trace",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int trace_init(void)
+{
+ platform_driver_register(&trace_driver);
+ return 0;
+}
+static void trace_exit(void)
+{
+ platform_driver_unregister(&trace_driver);
+}
+module_init(trace_init);
+module_exit(trace_exit);
+
+MODULE_AUTHOR("ST-Ericsson");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/dbx500-mloader.c b/drivers/misc/dbx500-mloader.c
new file mode 100644
index 00000000000..408d1a1a29e
--- /dev/null
+++ b/drivers/misc/dbx500-mloader.c
@@ -0,0 +1,288 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Ludovic Barre <ludovic.barre@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/mman.h>
+#include <linux/io.h>
+
+#include <mach/mloader-dbx500.h>
+#include <linux/mloader.h>
+#include <mach/hardware.h>
+
+#define DEVICE_NAME "dbx500_mloader_fw"
+
+struct mloader_priv {
+ struct platform_device *pdev;
+ struct dbx500_mloader_pdata *pdata;
+ struct miscdevice misc_dev;
+ u32 aeras_size;
+ void __iomem *uid_base;
+ u8 size;
+};
+
+static struct mloader_priv *mloader_priv;
+
+static int mloader_fw_send(struct dbx500_ml_fw *fw_info)
+{
+ const struct firmware *fw;
+ unsigned long size;
+ unsigned long phys_start;
+ void *fw_data;
+ void *vaddr;
+ void __iomem *ioaddr;
+ int ret;
+
+ ret = request_firmware(&fw, fw_info->name, &mloader_priv->pdev->dev);
+ if (ret) {
+ dev_err(&mloader_priv->pdev->dev, "request firmware failed\n");
+ goto out;
+ }
+
+ if (fw->size > (fw_info->area->size - fw_info->offset)) {
+ dev_err(&mloader_priv->pdev->dev,
+ "fw:%s is too big for:%s\n",
+ fw_info->name, fw_info->area->name);
+ ret = -EINVAL;
+ goto err_fw;
+ }
+
+ size = PAGE_ALIGN(fw->size);
+ phys_start = fw_info->area->start + fw_info->offset;
+ phys_start &= PAGE_MASK;
+ ioaddr = ioremap(phys_start, size);
+ if (!ioaddr) {
+ dev_err(&mloader_priv->pdev->dev,
+ "failed remap memory region.\n");
+ ret = -EINVAL;
+ goto err_fw;
+ }
+
+ vaddr = ioaddr + (fw_info->offset & ~PAGE_MASK);
+ fw_data = (void *)fw->data;
+ memcpy_toio(vaddr, fw_data, fw->size);
+ iounmap(ioaddr);
+
+err_fw:
+ release_firmware(fw);
+out:
+ return ret;
+}
+
+static int mloader_fw_upload(void)
+{
+ int i, ret;
+ struct dbx500_mloader_pdata *pdata = mloader_priv->pdata;
+
+ for (i = 0; i < pdata->nr_fws; i++) {
+ ret = mloader_fw_send(&pdata->fws[i]);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+err:
+ dev_err(&mloader_priv->pdev->dev,
+ "Failed to upload %s firmware", pdata->fws[i].name);
+ return ret;
+}
+
+static int mloader_fw_mmapdump(struct file *file, struct vm_area_struct *vma)
+{
+ int i;
+ unsigned long dump_size = 0;
+ unsigned long vma_start = vma->vm_start;
+
+ if (vma->vm_flags & VM_WRITE)
+ return -EPERM;
+
+ for (i = 0 ; i < mloader_priv->pdata->nr_areas ; i++)
+ dump_size += mloader_priv->pdata->areas[i].size;
+
+ if ((vma->vm_end - vma->vm_start) < dump_size)
+ return -EINVAL;
+
+ for (i = 0 ; i < mloader_priv->pdata->nr_areas ; i++) {
+ if (remap_pfn_range(vma,
+ vma_start,
+ mloader_priv->pdata->areas[i].start >> PAGE_SHIFT,
+ mloader_priv->pdata->areas[i].size,
+ vma->vm_page_prot))
+ return -EAGAIN;
+ vma_start += mloader_priv->pdata->areas[i].size;
+ }
+ return 0;
+}
+
+static void mloader_fw_dumpinfo(struct dump_image *images)
+{
+ u32 offset = 0;
+ int i;
+
+ for (i = 0 ; i < mloader_priv->pdata->nr_areas ; i++) {
+ strncpy(images[i].name,
+ mloader_priv->pdata->areas[i].name, MAX_NAME);
+ images[i].name[MAX_NAME-1] = 0;
+ images[i].offset = offset;
+ images[i].size = mloader_priv->pdata->areas[i].size;
+ offset += mloader_priv->pdata->areas[i].size;
+ }
+}
+
+static long mloader_fw_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ long ret = 0;
+ void __user *argp = (void __user *)arg;
+
+ switch (cmd) {
+ case ML_UPLOAD:
+ ret = mloader_fw_upload();
+ break;
+ case ML_GET_NBIMAGES:
+ ret = put_user(mloader_priv->pdata->nr_areas,
+ (unsigned long __user *)argp);
+ break;
+ case ML_GET_DUMPINFO: {
+ struct dump_image *dump_images;
+ dump_images = kzalloc(mloader_priv->pdata->nr_areas
+ * sizeof(struct dump_image), GFP_ATOMIC);
+ mloader_fw_dumpinfo(dump_images);
+ ret = copy_to_user(argp, (void *) dump_images,
+ mloader_priv->pdata->nr_areas
+ * sizeof(struct dump_image)) ? -EFAULT : 0;
+ kfree(dump_images);
+ break;
+ }
+ case ML_GET_FUSEINFO: {
+ ret = copy_to_user(argp, (void *) mloader_priv->uid_base,
+ mloader_priv->size) ? -EFAULT : 0;
+ break;
+ }
+ default:
+ ret = -EPERM;
+ break;
+ }
+
+ return ret;
+}
+
+static const struct file_operations modem_fw_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = mloader_fw_ioctl,
+ .mmap = mloader_fw_mmapdump,
+};
+
+static int __devinit mloader_fw_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ int i;
+ struct resource *res = NULL;
+
+ mloader_priv = kzalloc(sizeof(*mloader_priv), GFP_ATOMIC);
+ if (!mloader_priv) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mloader_priv->pdev = pdev;
+ mloader_priv->pdata = pdev->dev.platform_data;
+
+ mloader_priv->misc_dev.minor = MISC_DYNAMIC_MINOR;
+ mloader_priv->misc_dev.name = DEVICE_NAME;
+ mloader_priv->misc_dev.fops = &modem_fw_fops;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mloader_priv->size = resource_size(res);
+ mloader_priv->uid_base = ioremap(res->start, mloader_priv->size);
+
+ if (!mloader_priv->uid_base) {
+ ret = -ENOMEM;
+ goto err_free_priv;
+ }
+
+ ret = misc_register(&mloader_priv->misc_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't misc_register\n");
+ goto err_free_priv;
+ }
+
+ dev_info(&mloader_priv->pdev->dev, "mloader device register\n");
+
+ for (i = 0 ; i < mloader_priv->pdata->nr_areas ; i++) {
+ dev_dbg(&mloader_priv->pdev->dev,
+ "Area:%d (name:%s start:%x size:%x)\n",
+ i, mloader_priv->pdata->areas[i].name,
+ mloader_priv->pdata->areas[i].start,
+ mloader_priv->pdata->areas[i].size);
+ }
+
+ for (i = 0 ; i < mloader_priv->pdata->nr_fws ; i++) {
+ dev_dbg(&mloader_priv->pdev->dev,
+ "Firmware:%d (name:%s offset:%x "
+ "area_name:%s area_start:%x area_size:%x)\n",
+ i, mloader_priv->pdata->fws[i].name,
+ mloader_priv->pdata->fws[i].offset,
+ mloader_priv->pdata->fws[i].area->name,
+ mloader_priv->pdata->fws[i].area->start,
+ mloader_priv->pdata->fws[i].area->size);
+ }
+
+ return ret;
+
+err_free_priv:
+ kfree(mloader_priv);
+out:
+ return ret;
+}
+
+static int __devexit mloader_fw_remove(struct platform_device *pdev)
+{
+ int err;
+
+ err = misc_register(&mloader_priv->misc_dev);
+ if (err < 0)
+ dev_err(&pdev->dev, "can't misc_deregister, %d\n", err);
+
+ kfree(mloader_priv);
+
+ return err;
+}
+
+static struct platform_driver mloader_fw_driver = {
+ .driver.name = DEVICE_NAME,
+ .driver.owner = THIS_MODULE,
+ .probe = mloader_fw_probe,
+ .remove = __devexit_p(mloader_fw_remove),
+};
+
+static int __init mloader_fw_init(void)
+{
+ return platform_driver_register(&mloader_fw_driver);
+}
+
+static void __exit mloader_fw_exit(void)
+{
+ kfree(mloader_priv);
+ platform_driver_unregister(&mloader_fw_driver);
+}
+
+module_init(mloader_fw_init);
+module_exit(mloader_fw_exit);
+MODULE_DESCRIPTION("ST-Ericsson modem loader firmware");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Ludovic Barre <ludovic.barre@stericsson.com>");
diff --git a/drivers/misc/dispdev/Makefile b/drivers/misc/dispdev/Makefile
new file mode 100644
index 00000000000..11dc7611d26
--- /dev/null
+++ b/drivers/misc/dispdev/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_DISPDEV) += dispdev.o
diff --git a/drivers/misc/dispdev/dispdev.c b/drivers/misc/dispdev/dispdev.c
new file mode 100644
index 00000000000..5413a252d35
--- /dev/null
+++ b/drivers/misc/dispdev/dispdev.c
@@ -0,0 +1,659 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Display output device driver
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/idr.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/ioctl.h>
+
+#include <linux/dispdev.h>
+#include <linux/hwmem.h>
+#include <video/mcde_dss.h>
+
+#define DENSITY_CHECK (16)
+#define MAX_BUFFERS 4
+
+static LIST_HEAD(dev_list);
+static DEFINE_MUTEX(dev_list_lock);
+
+enum buffer_state {
+ BUF_UNUSED = 0,
+ BUF_QUEUED,
+ BUF_ACTIVATED,
+/*TODO:waitfordone BUF_DEACTIVATED,*/
+ BUF_FREE,
+ BUF_DEQUEUED,
+};
+
+struct dispdev_buffer {
+ struct hwmem_alloc *alloc;
+ u32 size;
+ enum buffer_state state;
+ u32 paddr; /* if pinned */
+};
+
+struct dispdev {
+ bool open;
+ struct mutex lock;
+ struct miscdevice mdev;
+ struct list_head list;
+ struct mcde_display_device *ddev;
+ struct mcde_overlay *ovly;
+ struct mcde_overlay *parent_ovly;
+ struct dispdev_config config;
+ bool overlay;
+ struct dispdev_buffer buffers[MAX_BUFFERS];
+ wait_queue_head_t waitq_dq;
+ /*
+ * For the rotation use case
+ * buffers_need_update is used to ensure that a set_config that
+ * changes width or height is followed by a unregister_buffer.
+ */
+ bool buffers_need_update;
+ /*
+ * For the overlay startup use case.
+ * first_update is used to handle the first update after a set_config.
+ * In this case a queue_buffer will arrive after set_config and not a
+ * unregister_buffer as in the rotation use case.
+ */
+ bool first_update;
+ char name[sizeof(DISPDEV_DEFAULT_DEVICE_PREFIX) + 3];
+};
+
+static int find_buf(struct dispdev *dd, enum buffer_state state)
+{
+ int i;
+ for (i = 0; i < MAX_BUFFERS; i++)
+ if (dd->buffers[i].state == state)
+ return i;
+ return -1;
+}
+
+int dispdev_open(struct inode *inode, struct file *file)
+{
+ int ret;
+ struct dispdev *dd = NULL;
+
+ mutex_lock(&dev_list_lock);
+ list_for_each_entry(dd, &dev_list, list)
+ if (dd->mdev.minor == iminor(inode))
+ break;
+
+ if (&dd->list == &dev_list) {
+ mutex_unlock(&dev_list_lock);
+ return -ENODEV;
+ }
+
+ if (dd->open) {
+ mutex_unlock(&dev_list_lock);
+ return -EBUSY;
+ }
+
+ dd->open = true;
+
+ mutex_unlock(&dev_list_lock);
+
+ ret = mcde_dss_enable_overlay(dd->ovly);
+ if (ret)
+ return ret;
+
+ file->private_data = dd;
+
+ return 0;
+}
+
+int dispdev_release(struct inode *inode, struct file *file)
+{
+ int i;
+ struct dispdev *dd = NULL;
+
+ mutex_lock(&dev_list_lock);
+ list_for_each_entry(dd, &dev_list, list)
+ if (dd->mdev.minor == iminor(inode))
+ break;
+ mutex_unlock(&dev_list_lock);
+
+ if (&dd->list == &dev_list)
+ return -ENODEV;
+
+ /* TODO: Make sure it waits for completion */
+ mcde_dss_disable_overlay(dd->ovly);
+ for (i = 0; i < MAX_BUFFERS; i++) {
+ if (dd->buffers[i].paddr)
+ hwmem_unpin(dd->buffers[i].alloc);
+ if (dd->buffers[i].alloc)
+ hwmem_release(dd->buffers[i].alloc);
+ dd->buffers[i].alloc = NULL;
+ dd->buffers[i].state = BUF_UNUSED;
+ dd->buffers[i].size = 0;
+ dd->buffers[i].paddr = 0;
+ }
+ dd->open = false;
+ wake_up(&dd->waitq_dq);
+ return 0;
+}
+
+static enum mcde_ovly_pix_fmt get_ovly_fmt(enum dispdev_fmt fmt)
+{
+ switch (fmt) {
+ default:
+ case DISPDEV_FMT_RGB565:
+ return MCDE_OVLYPIXFMT_RGB565;
+ case DISPDEV_FMT_RGB888:
+ return MCDE_OVLYPIXFMT_RGB888;
+ case DISPDEV_FMT_RGBA8888:
+ return MCDE_OVLYPIXFMT_RGBA8888;
+ case DISPDEV_FMT_RGBX8888:
+ return MCDE_OVLYPIXFMT_RGBX8888;
+ case DISPDEV_FMT_YUV422:
+ return MCDE_OVLYPIXFMT_YCbCr422;
+ }
+}
+
+static void get_ovly_info(struct dispdev_config *cfg,
+ struct mcde_video_mode *vmode,
+ struct mcde_overlay_info *info, bool overlay)
+{
+ info->paddr = 0;
+ info->stride = cfg->stride;
+ info->fmt = get_ovly_fmt(cfg->format);
+ info->src_x = 0;
+ info->src_y = 0;
+ info->dst_x = cfg->x;
+ info->dst_y = cfg->y;
+ info->dst_z = cfg->z;
+ info->w = cfg->width;
+ info->h = cfg->height;
+ info->dirty.x = 0;
+ info->dirty.y = 0;
+ info->dirty.w = vmode->xres;
+ info->dirty.h = vmode->yres;
+}
+
+static int dispdev_set_config(struct dispdev *dd, struct dispdev_config *cfg)
+{
+ int ret = 0;
+ if (memcmp(&dd->config, cfg, sizeof(struct dispdev_config)) == 0)
+ return 0;
+
+ /*
+ * Only update MCDE if format, stride, width and height
+ * is the same. Otherwise just store the new config and update
+ * MCDE in the next queue buffer. This because the buffer that is
+ * active can be have the wrong format, width ...
+ */
+ if (cfg->format == dd->config.format &&
+ cfg->stride == dd->config.stride &&
+ cfg->width == dd->config.width &&
+ cfg->height == dd->config.height) {
+
+ int buf_index;
+ if (!dd->buffers_need_update) {
+ buf_index = find_buf(dd, BUF_ACTIVATED);
+ if (buf_index >= 0) {
+ struct mcde_overlay_info info;
+ struct dispdev_buffer *buf;
+ struct mcde_video_mode vmode;
+
+ buf = &dd->buffers[buf_index];
+ mcde_dss_get_video_mode(dd->ddev, &vmode);
+ get_ovly_info(cfg, &vmode, &info, dd->overlay);
+ info.paddr = buf->paddr;
+ ret = mcde_dss_apply_overlay(dd->ovly, &info);
+ if (!ret)
+ mcde_dss_update_overlay(dd->ovly,
+ false);
+ }
+ }
+ } else {
+ dd->buffers_need_update = true;
+ }
+
+ dd->config = *cfg;
+
+ return ret;
+}
+
+static int dispdev_register_buffer(struct dispdev *dd, s32 hwmem_name)
+{
+ int ret;
+ struct dispdev_buffer *buf;
+ enum hwmem_mem_type memtype;
+ enum hwmem_access access;
+
+ ret = find_buf(dd, BUF_UNUSED);
+ if (ret < 0)
+ return -ENOMEM;
+ buf = &dd->buffers[ret];
+ buf->alloc = hwmem_resolve_by_name(hwmem_name);
+ if (IS_ERR(buf->alloc)) {
+ ret = PTR_ERR(buf->alloc);
+ goto resolve_failed;
+ }
+
+ hwmem_get_info(buf->alloc, &buf->size, &memtype, &access);
+
+ if (!(access & HWMEM_ACCESS_READ) ||
+ memtype != HWMEM_MEM_CONTIGUOUS_SYS) {
+ ret = -EACCES;
+ goto invalid_mem;
+ }
+
+ buf->state = BUF_FREE;
+ goto out;
+invalid_mem:
+ hwmem_release(buf->alloc);
+resolve_failed:
+out:
+ return ret;
+}
+
+static int dispdev_unregister_buffer(struct dispdev *dd, u32 buf_idx)
+{
+ struct dispdev_buffer *buf = &dd->buffers[buf_idx];
+
+ if (buf_idx >= ARRAY_SIZE(dd->buffers))
+ return -EINVAL;
+
+ if (buf->state == BUF_UNUSED)
+ return -EINVAL;
+
+ if (dd->buffers_need_update)
+ dd->buffers_need_update = false;
+
+ if (buf->state == BUF_ACTIVATED) {
+ /* Disable the overlay */
+ struct mcde_overlay_info info;
+ struct mcde_video_mode vmode;
+ /* TODO Wait for frame done */
+ mcde_dss_get_video_mode(dd->ddev, &vmode);
+ get_ovly_info(&dd->config, &vmode, &info, dd->overlay);
+ mcde_dss_apply_overlay(dd->ovly, &info);
+ mcde_dss_update_overlay(dd->ovly, false);
+ hwmem_unpin(dd->buffers[buf_idx].alloc);
+ }
+
+ hwmem_release(buf->alloc);
+ buf->state = BUF_UNUSED;
+ buf->alloc = NULL;
+ buf->size = 0;
+ buf->paddr = 0;
+ dd->first_update = false;
+
+ return 0;
+}
+
+
+/**
+ * @brief Check if the buffer is transparent or black (ARGB = X000)
+ * Note: Only for ARGB32.
+ * Worst case: a ~full transparent buffer
+ * Results: ~2200us @800Mhz for a WVGA screen, with DENSITY_CHECK=8
+ * ~520us @800Mhz for a WVGA screen, with DENSITY_CHECK=16
+ *
+ * @param w witdh
+ * @param h height
+ * @param addr buffer addr
+ *
+ * @return 1 if the buffer is transparent, else 0
+ */
+static int is_transparent(int w, int h, u32 *addr)
+{
+ int i, j;
+ u32 *c, *next_line;
+ u32 sum;
+
+ next_line = addr;
+ sum = 0;
+
+ /* TODO Optimize me */
+ for (j = 0; j < h; j += DENSITY_CHECK) {
+ c = next_line;
+ for (i = 0; i < w; i += DENSITY_CHECK) {
+ sum += ((*c) & 0x00FFFFFF);
+ c += DENSITY_CHECK;
+ }
+ if (sum)
+ return 0; /* Not "transparent" */
+ next_line += (w * DENSITY_CHECK);
+ }
+
+ return 1; /* "Transparent" */
+}
+
+static int dispdev_queue_buffer(struct dispdev *dd,
+ struct dispdev_buffer_info *buffer)
+{
+ int ret, i;
+ struct mcde_overlay_info info;
+ struct hwmem_mem_chunk mem_chunk;
+ size_t mem_chunk_length = 1;
+ struct hwmem_region rgn = { .offset = 0, .count = 1, .start = 0 };
+ struct hwmem_alloc *alloc;
+ struct mcde_video_mode vmode;
+ u32 buf_idx = buffer->buf_idx;
+
+ if (buf_idx >= ARRAY_SIZE(dd->buffers) ||
+ dd->buffers[buf_idx].state != BUF_DEQUEUED)
+ return -EINVAL;
+
+ alloc = dd->buffers[buf_idx].alloc;
+ mcde_dss_get_video_mode(dd->ddev, &vmode);
+ get_ovly_info(&dd->config, &vmode, &info, dd->overlay);
+ ret = hwmem_pin(alloc, &mem_chunk, &mem_chunk_length);
+ if (ret) {
+ dev_warn(dd->mdev.this_device, "Pin failed, %d\n", ret);
+ return -EINVAL;
+ }
+
+ rgn.size = rgn.end = dd->buffers[buf_idx].size;
+ ret = hwmem_set_domain(alloc, HWMEM_ACCESS_READ,
+ HWMEM_DOMAIN_SYNC, &rgn);
+ if (ret)
+ dev_warn(dd->mdev.this_device, "Set domain failed, %d\n", ret);
+
+ i = find_buf(dd, BUF_ACTIVATED);
+ if (i >= 0) {
+ dd->buffers[i].state = BUF_FREE;
+ wake_up(&dd->waitq_dq);
+ }
+
+ if (!dd->first_update) {
+ dd->first_update = true;
+ dd->buffers_need_update = false;
+ }
+
+ dd->buffers[buf_idx].paddr = mem_chunk.paddr;
+
+ if (buffer->display_update && !dd->buffers_need_update &&
+ dd->config.width == buffer->buf_cfg.width &&
+ dd->config.height == buffer->buf_cfg.height &&
+ dd->config.format == buffer->buf_cfg.format &&
+ dd->config.stride == buffer->buf_cfg.stride) {
+ info.paddr = mem_chunk.paddr;
+ mcde_dss_apply_overlay(dd->ovly, &info);
+ mcde_dss_update_overlay(dd->ovly, false);
+ } else if (buffer->display_update) {
+ dd->buffers_need_update = true;
+ }
+
+ /* Disable the MCDE FB overlay */
+ if ((dd->parent_ovly->state != NULL) &&
+ (dd->ddev->check_transparency)) {
+ dd->ddev->check_transparency--;
+ mcde_dss_get_overlay_info(dd->parent_ovly, &info);
+ if (dd->ddev->check_transparency == 0) {
+ if (is_transparent(info.w, info.h, info.vaddr)) {
+ mcde_dss_disable_overlay(dd->parent_ovly);
+ printk(KERN_INFO "%s Disable overlay\n",
+ __func__);
+ }
+ }
+ }
+
+ dd->buffers[buf_idx].state = BUF_ACTIVATED;
+
+ return 0;
+}
+
+static int dispdev_dequeue_buffer(struct dispdev *dd)
+{
+ int i;
+
+ i = find_buf(dd, BUF_FREE);
+ if (i < 0) {
+ if (find_buf(dd, BUF_ACTIVATED) < 0)
+ return -EINVAL;
+ mutex_unlock(&dd->lock);
+ wait_event(dd->waitq_dq, (i = find_buf(dd, BUF_FREE)) >= 0);
+ mutex_lock(&dd->lock);
+ }
+ hwmem_unpin(dd->buffers[i].alloc);
+ dd->buffers[i].state = BUF_DEQUEUED;
+ dd->buffers[i].paddr = 0;
+
+ return i;
+}
+
+long dispdev_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+ struct dispdev *dd = (struct dispdev *)file->private_data;
+
+ mutex_lock(&dd->lock);
+
+ switch (cmd) {
+ case DISPDEV_SET_CONFIG_IOC:
+ {
+ struct dispdev_config cfg;
+ if (copy_from_user(&cfg, (void __user *)arg,
+ sizeof(cfg)))
+ ret = -EFAULT;
+ else
+ ret = dispdev_set_config(dd, &cfg);
+ }
+ break;
+ case DISPDEV_GET_CONFIG_IOC:
+ ret = copy_to_user((void __user *)arg, &dd->config,
+ sizeof(dd->config));
+ if (ret)
+ ret = -EFAULT;
+ break;
+ case DISPDEV_REGISTER_BUFFER_IOC:
+ ret = dispdev_register_buffer(dd, (s32)arg);
+ break;
+ case DISPDEV_UNREGISTER_BUFFER_IOC:
+ ret = dispdev_unregister_buffer(dd, (u32)arg);
+ break;
+ case DISPDEV_QUEUE_BUFFER_IOC:
+ {
+ struct dispdev_buffer_info buffer;
+ if (copy_from_user(&buffer, (void __user *)arg,
+ sizeof(buffer)))
+ ret = -EFAULT;
+ else
+ ret = dispdev_queue_buffer(dd, &buffer);
+ break;
+ }
+ case DISPDEV_DEQUEUE_BUFFER_IOC:
+ ret = dispdev_dequeue_buffer(dd);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ mutex_unlock(&dd->lock);
+
+ return ret;
+}
+
+static const struct file_operations dispdev_fops = {
+ .open = dispdev_open,
+ .release = dispdev_release,
+ .unlocked_ioctl = dispdev_ioctl,
+};
+
+static void init_dispdev(struct dispdev *dd, struct mcde_display_device *ddev,
+ const char *name, bool overlay)
+{
+ u16 w, h;
+ int rotation;
+
+ mutex_init(&dd->lock);
+ INIT_LIST_HEAD(&dd->list);
+ dd->ddev = ddev;
+ dd->overlay = overlay;
+ mcde_dss_get_native_resolution(ddev, &w, &h);
+ rotation = mcde_dss_get_rotation(ddev);
+
+ if ((rotation == MCDE_DISPLAY_ROT_90_CCW) ||
+ (rotation == MCDE_DISPLAY_ROT_90_CW)) {
+ dd->config.width = h;
+ dd->config.height = w;
+ } else {
+ dd->config.width = w;
+ dd->config.height = h;
+ }
+ dd->config.format = DISPDEV_FMT_RGB565;
+ dd->config.stride = sizeof(u16) * w;
+ dd->config.x = 0;
+ dd->config.y = 0;
+ dd->config.z = 0;
+ dd->buffers_need_update = false;
+ dd->first_update = false;
+ init_waitqueue_head(&dd->waitq_dq);
+ dd->mdev.minor = MISC_DYNAMIC_MINOR;
+ dd->mdev.name = name;
+ dd->mdev.fops = &dispdev_fops;
+ pr_info("%s: name=%s w=%d, h=%d, fmt=%d, stride=%d\n", __func__, name,
+ dd->config.width, dd->config.height, dd->config.format,
+ dd->config.stride);
+}
+
+int dispdev_create(struct mcde_display_device *ddev, bool overlay,
+ struct mcde_overlay *parent_ovly)
+{
+ int ret = 0;
+ struct dispdev *dd;
+ struct mcde_video_mode vmode;
+ struct mcde_overlay_info info = {0};
+
+ static int counter;
+
+ dd = kzalloc(sizeof(struct dispdev), GFP_KERNEL);
+ if (!dd)
+ return -ENOMEM;
+
+ snprintf(dd->name, sizeof(dd->name), "%s%d",
+ DISPDEV_DEFAULT_DEVICE_PREFIX, counter++);
+ init_dispdev(dd, ddev, dd->name, overlay);
+
+ if (!overlay) {
+ ret = mcde_dss_enable_display(ddev);
+ if (ret)
+ goto fail_enable_display;
+ mcde_dss_get_video_mode(ddev, &vmode);
+ mcde_dss_try_video_mode(ddev, &vmode);
+ ret = mcde_dss_set_video_mode(ddev, &vmode);
+ if (ret)
+ goto fail_set_video_mode;
+ mcde_dss_set_pixel_format(ddev, info.fmt);
+ mcde_dss_apply_channel(ddev);
+ } else
+ mcde_dss_get_video_mode(ddev, &vmode);
+ get_ovly_info(&dd->config, &vmode, &info, overlay);
+
+ /* Save the MCDE FB overlay */
+ dd->parent_ovly = parent_ovly;
+
+ dd->ovly = mcde_dss_create_overlay(ddev, &info);
+ if (!dd->ovly) {
+ ret = -ENOMEM;
+ goto fail_create_ovly;
+ }
+
+ ret = misc_register(&dd->mdev);
+ if (ret)
+ goto fail_register_misc;
+ mutex_lock(&dev_list_lock);
+ list_add_tail(&dd->list, &dev_list);
+ mutex_unlock(&dev_list_lock);
+
+ goto out;
+
+fail_register_misc:
+ mcde_dss_destroy_overlay(dd->ovly);
+fail_create_ovly:
+ if (!overlay)
+ mcde_dss_disable_display(ddev);
+fail_set_video_mode:
+fail_enable_display:
+ kfree(dd);
+out:
+ return ret;
+}
+
+void dispdev_destroy(struct mcde_display_device *ddev)
+{
+ struct dispdev *dd;
+ struct dispdev *tmp;
+
+ mutex_lock(&dev_list_lock);
+ list_for_each_entry_safe(dd, tmp, &dev_list, list) {
+ if (dd->ddev == ddev) {
+ list_del(&dd->list);
+ misc_deregister(&dd->mdev);
+ mcde_dss_destroy_overlay(dd->ovly);
+ /*
+ * TODO: Uncomment when DSS has reference
+ * counting of enable/disable
+ */
+ /* mcde_dss_disable_display(dd->ddev); */
+ kfree(dd);
+ break;
+ }
+ }
+ mutex_unlock(&dev_list_lock);
+}
+
+static void dispdev_destroy_all(void)
+{
+ struct dispdev *dd;
+ struct dispdev *tmp;
+
+ mutex_lock(&dev_list_lock);
+ list_for_each_entry_safe(dd, tmp, &dev_list, list) {
+ list_del(&dd->list);
+ misc_deregister(&dd->mdev);
+ mcde_dss_destroy_overlay(dd->ovly);
+ /*
+ * TODO: Uncomment when DSS has reference
+ * counting of enable/disable
+ */
+ /* mcde_dss_disable_display(dd->ddev); */
+ kfree(dd);
+ }
+ mutex_unlock(&dev_list_lock);
+
+ mutex_destroy(&dev_list_lock);
+}
+
+static int __init dispdev_init(void)
+{
+ pr_info("%s\n", __func__);
+
+ mutex_init(&dev_list_lock);
+
+ return 0;
+}
+module_init(dispdev_init);
+
+static void __exit dispdev_exit(void)
+{
+ dispdev_destroy_all();
+ pr_info("%s\n", __func__);
+}
+module_exit(dispdev_exit);
+
+MODULE_AUTHOR("Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Display output device driver");
+
diff --git a/drivers/misc/hwmem/Makefile b/drivers/misc/hwmem/Makefile
new file mode 100644
index 00000000000..c307616a181
--- /dev/null
+++ b/drivers/misc/hwmem/Makefile
@@ -0,0 +1,3 @@
+hwmem-objs := hwmem-main.o hwmem-ioctl.o cache_handler.o contig_alloc.o
+
+obj-$(CONFIG_HWMEM) += hwmem.o
diff --git a/drivers/misc/hwmem/cache_handler.c b/drivers/misc/hwmem/cache_handler.c
new file mode 100644
index 00000000000..e0ab4ee6cf8
--- /dev/null
+++ b/drivers/misc/hwmem/cache_handler.c
@@ -0,0 +1,510 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Cache handler
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/hwmem.h>
+
+#include <asm/pgtable.h>
+
+#include <mach/dcache.h>
+
+#include "cache_handler.h"
+
+#define U32_MAX (~(u32)0)
+
+enum hwmem_alloc_flags cachi_get_cache_settings(
+ enum hwmem_alloc_flags requested_cache_settings);
+void cachi_set_pgprot_cache_options(enum hwmem_alloc_flags cache_settings,
+ pgprot_t *pgprot);
+
+static void sync_buf_pre_cpu(struct cach_buf *buf, enum hwmem_access access,
+ struct hwmem_region *region);
+static void sync_buf_post_cpu(struct cach_buf *buf,
+ enum hwmem_access next_access, struct hwmem_region *next_region);
+
+static void invalidate_cpu_cache(struct cach_buf *buf,
+ struct cach_range *range_2b_used);
+static void clean_cpu_cache(struct cach_buf *buf,
+ struct cach_range *range_2b_used);
+static void flush_cpu_cache(struct cach_buf *buf,
+ struct cach_range *range_2b_used);
+
+static void null_range(struct cach_range *range);
+static void expand_range(struct cach_range *range,
+ struct cach_range *range_2_add);
+/*
+ * Expands range to one of enclosing_range's two edges. The function will
+ * choose which of enclosing_range's edges to expand range to in such a
+ * way that the size of range is minimized. range must be located inside
+ * enclosing_range.
+ */
+static void expand_range_2_edge(struct cach_range *range,
+ struct cach_range *enclosing_range);
+static void shrink_range(struct cach_range *range,
+ struct cach_range *range_2_remove);
+static bool is_non_empty_range(struct cach_range *range);
+static void intersect_range(struct cach_range *range_1,
+ struct cach_range *range_2, struct cach_range *intersection);
+/* Align_up restrictions apply here to */
+static void align_range_up(struct cach_range *range, u32 alignment);
+static u32 range_length(struct cach_range *range);
+static void region_2_range(struct hwmem_region *region, u32 buffer_size,
+ struct cach_range *range);
+
+static void *offset_2_vaddr(struct cach_buf *buf, u32 offset);
+static u32 offset_2_paddr(struct cach_buf *buf, u32 offset);
+
+/* Saturates, might return unaligned values when that happens */
+static u32 align_up(u32 value, u32 alignment);
+static u32 align_down(u32 value, u32 alignment);
+
+/*
+ * Exported functions
+ */
+
+void cach_init_buf(struct cach_buf *buf, enum hwmem_alloc_flags cache_settings,
+ u32 size)
+{
+ buf->vstart = NULL;
+ buf->pstart = 0;
+ buf->size = size;
+
+ buf->cache_settings = cachi_get_cache_settings(cache_settings);
+}
+
+void cach_set_buf_addrs(struct cach_buf *buf, void* vaddr, u32 paddr)
+{
+ bool tmp;
+
+ buf->vstart = vaddr;
+ buf->pstart = paddr;
+
+ if (buf->cache_settings & HWMEM_ALLOC_HINT_CACHED) {
+ /*
+ * Keep whatever is in the cache. This way we avoid an
+ * unnecessary synch if CPU is the first user.
+ */
+ buf->range_in_cpu_cache.start = 0;
+ buf->range_in_cpu_cache.end = buf->size;
+ align_range_up(&buf->range_in_cpu_cache,
+ get_dcache_granularity());
+ buf->range_dirty_in_cpu_cache.start = 0;
+ buf->range_dirty_in_cpu_cache.end = buf->size;
+ align_range_up(&buf->range_dirty_in_cpu_cache,
+ get_dcache_granularity());
+ } else {
+ flush_cpu_dcache(buf->vstart, buf->pstart, buf->size, false,
+ &tmp);
+ drain_cpu_write_buf();
+
+ null_range(&buf->range_in_cpu_cache);
+ null_range(&buf->range_dirty_in_cpu_cache);
+ }
+ null_range(&buf->range_invalid_in_cpu_cache);
+}
+
+void cach_set_pgprot_cache_options(struct cach_buf *buf, pgprot_t *pgprot)
+{
+ cachi_set_pgprot_cache_options(buf->cache_settings, pgprot);
+}
+
+void cach_set_domain(struct cach_buf *buf, enum hwmem_access access,
+ enum hwmem_domain domain, struct hwmem_region *region)
+{
+ struct hwmem_region *__region;
+ struct hwmem_region full_region;
+
+ if (region != NULL) {
+ __region = region;
+ } else {
+ full_region.offset = 0;
+ full_region.count = 1;
+ full_region.start = 0;
+ full_region.end = buf->size;
+ full_region.size = buf->size;
+
+ __region = &full_region;
+ }
+
+ switch (domain) {
+ case HWMEM_DOMAIN_SYNC:
+ sync_buf_post_cpu(buf, access, __region);
+
+ break;
+
+ case HWMEM_DOMAIN_CPU:
+ sync_buf_pre_cpu(buf, access, __region);
+
+ break;
+ }
+}
+
+/*
+ * Local functions
+ */
+
+enum hwmem_alloc_flags __attribute__((weak)) cachi_get_cache_settings(
+ enum hwmem_alloc_flags requested_cache_settings)
+{
+ static const u32 CACHE_ON_FLAGS_MASK = HWMEM_ALLOC_HINT_CACHED |
+ HWMEM_ALLOC_HINT_CACHE_WB | HWMEM_ALLOC_HINT_CACHE_WT |
+ HWMEM_ALLOC_HINT_CACHE_NAOW | HWMEM_ALLOC_HINT_CACHE_AOW |
+ HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE |
+ HWMEM_ALLOC_HINT_INNER_CACHE_ONLY;
+ /* We don't know the cache setting so we assume worst case. */
+ static const u32 CACHE_SETTING = HWMEM_ALLOC_HINT_WRITE_COMBINE |
+ HWMEM_ALLOC_HINT_CACHED | HWMEM_ALLOC_HINT_CACHE_WB |
+ HWMEM_ALLOC_HINT_CACHE_AOW |
+ HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE;
+
+ if (requested_cache_settings & CACHE_ON_FLAGS_MASK)
+ return CACHE_SETTING;
+ else if (requested_cache_settings & HWMEM_ALLOC_HINT_WRITE_COMBINE ||
+ (requested_cache_settings & HWMEM_ALLOC_HINT_UNCACHED &&
+ !(requested_cache_settings &
+ HWMEM_ALLOC_HINT_NO_WRITE_COMBINE)))
+ return HWMEM_ALLOC_HINT_WRITE_COMBINE;
+ else if (requested_cache_settings &
+ (HWMEM_ALLOC_HINT_NO_WRITE_COMBINE |
+ HWMEM_ALLOC_HINT_UNCACHED))
+ return 0;
+ else
+ /* Nothing specified, use cached */
+ return CACHE_SETTING;
+}
+
+void __attribute__((weak)) cachi_set_pgprot_cache_options(
+ enum hwmem_alloc_flags cache_settings, pgprot_t *pgprot)
+{
+ if (cache_settings & HWMEM_ALLOC_HINT_CACHED)
+ *pgprot = *pgprot; /* To silence compiler and checkpatch */
+ else if (cache_settings & HWMEM_ALLOC_HINT_WRITE_COMBINE)
+ *pgprot = pgprot_writecombine(*pgprot);
+ else
+ *pgprot = pgprot_noncached(*pgprot);
+}
+
+bool __attribute__((weak)) speculative_data_prefetch(void)
+{
+ /* We don't know so we go with the safe alternative */
+ return true;
+}
+
+static void sync_buf_pre_cpu(struct cach_buf *buf, enum hwmem_access access,
+ struct hwmem_region *region)
+{
+ bool write = access & HWMEM_ACCESS_WRITE;
+ bool read = access & HWMEM_ACCESS_READ;
+
+ if (!write && !read)
+ return;
+
+ if (buf->cache_settings & HWMEM_ALLOC_HINT_CACHED) {
+ struct cach_range region_range;
+
+ region_2_range(region, buf->size, &region_range);
+
+ if (read || (write && buf->cache_settings &
+ HWMEM_ALLOC_HINT_CACHE_WB))
+ /* Perform defered invalidates */
+ invalidate_cpu_cache(buf, &region_range);
+ if (read || (write && buf->cache_settings &
+ HWMEM_ALLOC_HINT_CACHE_AOW))
+ expand_range(&buf->range_in_cpu_cache, &region_range);
+ if (write && buf->cache_settings & HWMEM_ALLOC_HINT_CACHE_WB) {
+ struct cach_range dirty_range_addition;
+
+ if (buf->cache_settings & HWMEM_ALLOC_HINT_CACHE_AOW)
+ dirty_range_addition = region_range;
+ else
+ intersect_range(&buf->range_in_cpu_cache,
+ &region_range, &dirty_range_addition);
+
+ expand_range(&buf->range_dirty_in_cpu_cache,
+ &dirty_range_addition);
+ }
+ }
+ if (buf->cache_settings & HWMEM_ALLOC_HINT_WRITE_COMBINE) {
+ if (write)
+ buf->in_cpu_write_buf = true;
+ }
+}
+
+static void sync_buf_post_cpu(struct cach_buf *buf,
+ enum hwmem_access next_access, struct hwmem_region *next_region)
+{
+ bool write = next_access & HWMEM_ACCESS_WRITE;
+ bool read = next_access & HWMEM_ACCESS_READ;
+ struct cach_range region_range;
+
+ if (!write && !read)
+ return;
+
+ region_2_range(next_region, buf->size, &region_range);
+
+ if (write) {
+ if (speculative_data_prefetch()) {
+ /* Defer invalidate */
+ struct cach_range intersection;
+
+ intersect_range(&buf->range_in_cpu_cache,
+ &region_range, &intersection);
+
+ expand_range(&buf->range_invalid_in_cpu_cache,
+ &intersection);
+
+ clean_cpu_cache(buf, &region_range);
+ } else {
+ flush_cpu_cache(buf, &region_range);
+ }
+ }
+ if (read)
+ clean_cpu_cache(buf, &region_range);
+
+ if (buf->in_cpu_write_buf) {
+ drain_cpu_write_buf();
+
+ buf->in_cpu_write_buf = false;
+ }
+}
+
+static void invalidate_cpu_cache(struct cach_buf *buf, struct cach_range *range)
+{
+ struct cach_range intersection;
+
+ intersect_range(&buf->range_invalid_in_cpu_cache, range,
+ &intersection);
+ if (is_non_empty_range(&intersection)) {
+ bool flushed_everything;
+
+ expand_range_2_edge(&intersection,
+ &buf->range_invalid_in_cpu_cache);
+
+ /*
+ * Cache handler never uses invalidate to discard data in the
+ * cache so we can use flush instead which is considerably
+ * faster for large buffers.
+ */
+ flush_cpu_dcache(
+ offset_2_vaddr(buf, intersection.start),
+ offset_2_paddr(buf, intersection.start),
+ range_length(&intersection),
+ buf->cache_settings &
+ HWMEM_ALLOC_HINT_INNER_CACHE_ONLY,
+ &flushed_everything);
+
+ if (flushed_everything) {
+ null_range(&buf->range_invalid_in_cpu_cache);
+ null_range(&buf->range_dirty_in_cpu_cache);
+ } else {
+ /*
+ * No need to shrink range_in_cpu_cache as invalidate
+ * is only used when we can't keep track of what's in
+ * the CPU cache.
+ */
+ shrink_range(&buf->range_invalid_in_cpu_cache,
+ &intersection);
+ }
+ }
+}
+
+static void clean_cpu_cache(struct cach_buf *buf, struct cach_range *range)
+{
+ struct cach_range intersection;
+
+ intersect_range(&buf->range_dirty_in_cpu_cache, range, &intersection);
+ if (is_non_empty_range(&intersection)) {
+ bool cleaned_everything;
+
+ expand_range_2_edge(&intersection,
+ &buf->range_dirty_in_cpu_cache);
+
+ clean_cpu_dcache(
+ offset_2_vaddr(buf, intersection.start),
+ offset_2_paddr(buf, intersection.start),
+ range_length(&intersection),
+ buf->cache_settings &
+ HWMEM_ALLOC_HINT_INNER_CACHE_ONLY,
+ &cleaned_everything);
+
+ if (cleaned_everything)
+ null_range(&buf->range_dirty_in_cpu_cache);
+ else
+ shrink_range(&buf->range_dirty_in_cpu_cache,
+ &intersection);
+ }
+}
+
+static void flush_cpu_cache(struct cach_buf *buf, struct cach_range *range)
+{
+ struct cach_range intersection;
+
+ intersect_range(&buf->range_in_cpu_cache, range, &intersection);
+ if (is_non_empty_range(&intersection)) {
+ bool flushed_everything;
+
+ expand_range_2_edge(&intersection, &buf->range_in_cpu_cache);
+
+ flush_cpu_dcache(
+ offset_2_vaddr(buf, intersection.start),
+ offset_2_paddr(buf, intersection.start),
+ range_length(&intersection),
+ buf->cache_settings &
+ HWMEM_ALLOC_HINT_INNER_CACHE_ONLY,
+ &flushed_everything);
+
+ if (flushed_everything) {
+ if (!speculative_data_prefetch())
+ null_range(&buf->range_in_cpu_cache);
+ null_range(&buf->range_dirty_in_cpu_cache);
+ null_range(&buf->range_invalid_in_cpu_cache);
+ } else {
+ if (!speculative_data_prefetch())
+ shrink_range(&buf->range_in_cpu_cache,
+ &intersection);
+ shrink_range(&buf->range_dirty_in_cpu_cache,
+ &intersection);
+ shrink_range(&buf->range_invalid_in_cpu_cache,
+ &intersection);
+ }
+ }
+}
+
+static void null_range(struct cach_range *range)
+{
+ range->start = U32_MAX;
+ range->end = 0;
+}
+
+static void expand_range(struct cach_range *range,
+ struct cach_range *range_2_add)
+{
+ range->start = min(range->start, range_2_add->start);
+ range->end = max(range->end, range_2_add->end);
+}
+
+/*
+ * Expands range to one of enclosing_range's two edges. The function will
+ * choose which of enclosing_range's edges to expand range to in such a
+ * way that the size of range is minimized. range must be located inside
+ * enclosing_range.
+ */
+static void expand_range_2_edge(struct cach_range *range,
+ struct cach_range *enclosing_range)
+{
+ u32 space_on_low_side = range->start - enclosing_range->start;
+ u32 space_on_high_side = enclosing_range->end - range->end;
+
+ if (space_on_low_side < space_on_high_side)
+ range->start = enclosing_range->start;
+ else
+ range->end = enclosing_range->end;
+}
+
+static void shrink_range(struct cach_range *range,
+ struct cach_range *range_2_remove)
+{
+ if (range_2_remove->start > range->start)
+ range->end = min(range->end, range_2_remove->start);
+ else
+ range->start = max(range->start, range_2_remove->end);
+
+ if (range->start >= range->end)
+ null_range(range);
+}
+
+static bool is_non_empty_range(struct cach_range *range)
+{
+ return range->end > range->start;
+}
+
+static void intersect_range(struct cach_range *range_1,
+ struct cach_range *range_2, struct cach_range *intersection)
+{
+ intersection->start = max(range_1->start, range_2->start);
+ intersection->end = min(range_1->end, range_2->end);
+
+ if (intersection->start >= intersection->end)
+ null_range(intersection);
+}
+
+/* Align_up restrictions apply here to */
+static void align_range_up(struct cach_range *range, u32 alignment)
+{
+ if (!is_non_empty_range(range))
+ return;
+
+ range->start = align_down(range->start, alignment);
+ range->end = align_up(range->end, alignment);
+}
+
+static u32 range_length(struct cach_range *range)
+{
+ if (is_non_empty_range(range))
+ return range->end - range->start;
+ else
+ return 0;
+}
+
+static void region_2_range(struct hwmem_region *region, u32 buffer_size,
+ struct cach_range *range)
+{
+ /*
+ * We don't care about invalid regions, instead we limit the region's
+ * range to the buffer's range. This should work good enough, worst
+ * case we synch the entire buffer when we get an invalid region which
+ * is acceptable.
+ */
+ range->start = region->offset + region->start;
+ range->end = min(region->offset + (region->count * region->size) -
+ (region->size - region->end), buffer_size);
+ if (range->start >= range->end) {
+ null_range(range);
+ return;
+ }
+
+ align_range_up(range, get_dcache_granularity());
+}
+
+static void *offset_2_vaddr(struct cach_buf *buf, u32 offset)
+{
+ return (void *)((u32)buf->vstart + offset);
+}
+
+static u32 offset_2_paddr(struct cach_buf *buf, u32 offset)
+{
+ return buf->pstart + offset;
+}
+
+/* Saturates, might return unaligned values when that happens */
+static u32 align_up(u32 value, u32 alignment)
+{
+ u32 remainder = value % alignment;
+ u32 value_2_add;
+
+ if (remainder == 0)
+ return value;
+
+ value_2_add = alignment - remainder;
+
+ if (value_2_add > U32_MAX - value) /* Will overflow */
+ return U32_MAX;
+
+ return value + value_2_add;
+}
+
+static u32 align_down(u32 value, u32 alignment)
+{
+ u32 remainder = value % alignment;
+ if (remainder == 0)
+ return value;
+
+ return value - remainder;
+}
diff --git a/drivers/misc/hwmem/cache_handler.h b/drivers/misc/hwmem/cache_handler.h
new file mode 100644
index 00000000000..792105196fa
--- /dev/null
+++ b/drivers/misc/hwmem/cache_handler.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Cache handler
+ *
+ * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+/*
+ * Cache handler can not handle simultaneous execution! The caller has to
+ * ensure such a situation does not occur.
+ */
+
+#ifndef _CACHE_HANDLER_H_
+#define _CACHE_HANDLER_H_
+
+#include <linux/types.h>
+#include <linux/hwmem.h>
+
+/*
+ * To not have to double all datatypes we've used hwmem datatypes. If someone
+ * want's to use cache handler but not hwmem then we'll have to define our own
+ * datatypes.
+ */
+
+struct cach_range {
+ u32 start; /* Inclusive */
+ u32 end; /* Exclusive */
+};
+
+/*
+ * Internal, do not touch!
+ */
+struct cach_buf {
+ void *vstart;
+ u32 pstart;
+ u32 size;
+
+ /* Remaining hints are active */
+ enum hwmem_alloc_flags cache_settings;
+
+ bool in_cpu_write_buf;
+ struct cach_range range_in_cpu_cache;
+ struct cach_range range_dirty_in_cpu_cache;
+ struct cach_range range_invalid_in_cpu_cache;
+};
+
+void cach_init_buf(struct cach_buf *buf,
+ enum hwmem_alloc_flags cache_settings, u32 size);
+
+void cach_set_buf_addrs(struct cach_buf *buf, void* vaddr, u32 paddr);
+
+void cach_set_pgprot_cache_options(struct cach_buf *buf, pgprot_t *pgprot);
+
+void cach_set_domain(struct cach_buf *buf, enum hwmem_access access,
+ enum hwmem_domain domain, struct hwmem_region *region);
+
+#endif /* _CACHE_HANDLER_H_ */
diff --git a/drivers/misc/hwmem/contig_alloc.c b/drivers/misc/hwmem/contig_alloc.c
new file mode 100644
index 00000000000..31533ed5988
--- /dev/null
+++ b/drivers/misc/hwmem/contig_alloc.c
@@ -0,0 +1,571 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Contiguous memory allocator
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>,
+ * Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <asm/sizes.h>
+
+#define MAX_INSTANCE_NAME_LENGTH 31
+
+struct alloc {
+ struct list_head list;
+
+ bool in_use;
+ phys_addr_t paddr;
+ size_t size;
+};
+
+struct instance {
+ struct list_head list;
+
+ char name[MAX_INSTANCE_NAME_LENGTH + 1];
+
+ phys_addr_t region_paddr;
+ void *region_kaddr;
+ size_t region_size;
+
+ struct list_head alloc_list;
+
+#ifdef CONFIG_DEBUG_FS
+ struct inode *debugfs_inode;
+ int cona_status_free;
+ int cona_status_used;
+ int cona_status_max_cont;
+ int cona_status_max_check;
+ int cona_status_biggest_free;
+ int cona_status_printed;
+#endif /* #ifdef CONFIG_DEBUG_FS */
+};
+
+static LIST_HEAD(instance_list);
+
+static DEFINE_MUTEX(lock);
+
+void *cona_create(const char *name, phys_addr_t region_paddr,
+ size_t region_size);
+void *cona_alloc(void *instance, size_t size);
+void cona_free(void *instance, void *alloc);
+phys_addr_t cona_get_alloc_paddr(void *alloc);
+void *cona_get_alloc_kaddr(void *instance, void *alloc);
+size_t cona_get_alloc_size(void *alloc);
+
+static int init_alloc_list(struct instance *instance);
+static void clean_alloc_list(struct instance *instance);
+static struct alloc *find_free_alloc_bestfit(struct instance *instance,
+ size_t size);
+static struct alloc *split_allocation(struct alloc *alloc,
+ size_t new_alloc_size);
+static phys_addr_t get_alloc_offset(struct instance *instance,
+ struct alloc *alloc);
+
+void *cona_create(const char *name, phys_addr_t region_paddr,
+ size_t region_size)
+{
+ int ret;
+ struct instance *instance;
+ struct vm_struct *vm_area;
+
+ if (region_size == 0)
+ return ERR_PTR(-EINVAL);
+
+ instance = kzalloc(sizeof(*instance), GFP_KERNEL);
+ if (instance == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(instance->name, name, MAX_INSTANCE_NAME_LENGTH + 1);
+ /* Truncate name if necessary */
+ instance->name[MAX_INSTANCE_NAME_LENGTH] = '\0';
+ instance->region_paddr = region_paddr;
+ instance->region_size = region_size;
+
+ vm_area = get_vm_area(region_size, VM_IOREMAP);
+ if (vm_area == NULL) {
+ printk(KERN_WARNING "CONA: Failed to allocate %u bytes"
+ " kernel virtual memory", region_size);
+ ret = -ENOMSG;
+ goto vmem_alloc_failed;
+ }
+ instance->region_kaddr = vm_area->addr;
+
+ INIT_LIST_HEAD(&instance->alloc_list);
+ ret = init_alloc_list(instance);
+ if (ret < 0)
+ goto init_alloc_list_failed;
+
+ mutex_lock(&lock);
+ list_add_tail(&instance->list, &instance_list);
+ mutex_unlock(&lock);
+
+ return instance;
+
+init_alloc_list_failed:
+ vm_area = remove_vm_area(instance->region_kaddr);
+ if (vm_area == NULL)
+ printk(KERN_ERR "CONA: Failed to free kernel virtual memory,"
+ " resource leak!\n");
+
+ kfree(vm_area);
+vmem_alloc_failed:
+ kfree(instance);
+
+ return ERR_PTR(ret);
+}
+
+void *cona_alloc(void *instance, size_t size)
+{
+ struct instance *instance_l = (struct instance *)instance;
+ struct alloc *alloc;
+
+ if (size == 0)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&lock);
+
+ alloc = find_free_alloc_bestfit(instance_l, size);
+ if (IS_ERR(alloc))
+ goto out;
+ if (size < alloc->size) {
+ alloc = split_allocation(alloc, size);
+ if (IS_ERR(alloc))
+ goto out;
+ } else {
+ alloc->in_use = true;
+ }
+#ifdef CONFIG_DEBUG_FS
+ instance_l->cona_status_max_cont += alloc->size;
+ instance_l->cona_status_max_check =
+ max(instance_l->cona_status_max_check,
+ instance_l->cona_status_max_cont);
+#endif /* #ifdef CONFIG_DEBUG_FS */
+
+out:
+ mutex_unlock(&lock);
+
+ return alloc;
+}
+
+void cona_free(void *instance, void *alloc)
+{
+ struct instance *instance_l = (struct instance *)instance;
+ struct alloc *alloc_l = (struct alloc *)alloc;
+ struct alloc *other;
+
+ mutex_lock(&lock);
+
+ alloc_l->in_use = false;
+
+#ifdef CONFIG_DEBUG_FS
+ instance_l->cona_status_max_cont -= alloc_l->size;
+#endif /* #ifdef CONFIG_DEBUG_FS */
+
+ other = list_entry(alloc_l->list.prev, struct alloc, list);
+ if ((alloc_l->list.prev != &instance_l->alloc_list) &&
+ !other->in_use) {
+ other->size += alloc_l->size;
+ list_del(&alloc_l->list);
+ kfree(alloc_l);
+ alloc_l = other;
+ }
+ other = list_entry(alloc_l->list.next, struct alloc, list);
+ if ((alloc_l->list.next != &instance_l->alloc_list) &&
+ !other->in_use) {
+ alloc_l->size += other->size;
+ list_del(&other->list);
+ kfree(other);
+ }
+
+ mutex_unlock(&lock);
+}
+
+phys_addr_t cona_get_alloc_paddr(void *alloc)
+{
+ return ((struct alloc *)alloc)->paddr;
+}
+
+void *cona_get_alloc_kaddr(void *instance, void *alloc)
+{
+ struct instance *instance_l = (struct instance *)instance;
+
+ return instance_l->region_kaddr + get_alloc_offset(instance_l,
+ (struct alloc *)alloc);
+}
+
+size_t cona_get_alloc_size(void *alloc)
+{
+ return ((struct alloc *)alloc)->size;
+}
+
+static int init_alloc_list(struct instance *instance)
+{
+ /*
+ * Hack to not get any allocs that cross a 64MiB boundary as B2R2 can't
+ * handle that.
+ */
+ int ret;
+ u32 curr_pos = instance->region_paddr;
+ u32 region_end = instance->region_paddr + instance->region_size;
+ u32 next_64mib_boundary = (curr_pos + SZ_64M) & ~(SZ_64M - 1);
+ struct alloc *alloc;
+
+ if (PAGE_SIZE >= SZ_64M) {
+ printk(KERN_WARNING "CONA: PAGE_SIZE >= 64MiB\n");
+ return -ENOMSG;
+ }
+
+ while (next_64mib_boundary < region_end) {
+ if (next_64mib_boundary - curr_pos > PAGE_SIZE) {
+ alloc = kzalloc(sizeof(struct alloc), GFP_KERNEL);
+ if (alloc == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ alloc->paddr = curr_pos;
+ alloc->size = next_64mib_boundary - curr_pos -
+ PAGE_SIZE;
+ alloc->in_use = false;
+ list_add_tail(&alloc->list, &instance->alloc_list);
+ curr_pos = alloc->paddr + alloc->size;
+ }
+
+ alloc = kzalloc(sizeof(struct alloc), GFP_KERNEL);
+ if (alloc == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ alloc->paddr = curr_pos;
+ alloc->size = PAGE_SIZE;
+ alloc->in_use = true;
+ list_add_tail(&alloc->list, &instance->alloc_list);
+ curr_pos = alloc->paddr + alloc->size;
+
+#ifdef CONFIG_DEBUG_FS
+ instance->cona_status_max_cont += alloc->size;
+#endif /* #ifdef CONFIG_DEBUG_FS */
+
+ next_64mib_boundary += SZ_64M;
+ }
+
+ alloc = kzalloc(sizeof(struct alloc), GFP_KERNEL);
+ if (alloc == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ alloc->paddr = curr_pos;
+ alloc->size = region_end - curr_pos;
+ alloc->in_use = false;
+ list_add_tail(&alloc->list, &instance->alloc_list);
+
+ return 0;
+
+error:
+ clean_alloc_list(instance);
+
+ return ret;
+}
+
+static void clean_alloc_list(struct instance *instance)
+{
+ while (list_empty(&instance->alloc_list) == 0) {
+ struct alloc *i = list_first_entry(&instance->alloc_list,
+ struct alloc, list);
+
+ list_del(&i->list);
+
+ kfree(i);
+ }
+}
+
+static struct alloc *find_free_alloc_bestfit(struct instance *instance,
+ size_t size)
+{
+ size_t best_diff = ~(size_t)0;
+ struct alloc *alloc = NULL, *i;
+
+ list_for_each_entry(i, &instance->alloc_list, list) {
+ size_t diff = i->size - size;
+ if (i->in_use || i->size < size)
+ continue;
+ if (diff < best_diff) {
+ alloc = i;
+ best_diff = diff;
+ }
+ }
+
+ return alloc != NULL ? alloc : ERR_PTR(-ENOMEM);
+}
+
+static struct alloc *split_allocation(struct alloc *alloc,
+ size_t new_alloc_size)
+{
+ struct alloc *new_alloc;
+
+ new_alloc = kzalloc(sizeof(struct alloc), GFP_KERNEL);
+ if (new_alloc == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ new_alloc->in_use = true;
+ new_alloc->paddr = alloc->paddr;
+ new_alloc->size = new_alloc_size;
+ alloc->size -= new_alloc_size;
+ alloc->paddr += new_alloc_size;
+
+ list_add_tail(&new_alloc->list, &alloc->list);
+
+ return new_alloc;
+}
+
+static phys_addr_t get_alloc_offset(struct instance *instance,
+ struct alloc *alloc)
+{
+ return alloc->paddr - instance->region_paddr;
+}
+
+/* Debug */
+
+#ifdef CONFIG_DEBUG_FS
+
+static int print_alloc(struct instance *instance, struct alloc *alloc,
+ char **buf, size_t buf_size);
+static int print_alloc_status(struct instance *instance, char **buf,
+ size_t buf_size);
+static struct instance *get_instance_from_file(struct file *file);
+static int debugfs_allocs_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos);
+
+static const struct file_operations debugfs_allocs_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_allocs_read,
+};
+
+static int print_alloc(struct instance *instance, struct alloc *alloc,
+ char **buf, size_t buf_size)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ size_t buf_size_l;
+ if (i == 0)
+ buf_size_l = 0;
+ else
+ buf_size_l = buf_size;
+
+ if (i == 1) {
+ if (alloc->in_use)
+ instance->cona_status_used += alloc->size;
+ else
+ instance->cona_status_free += alloc->size;
+ }
+
+ if (!alloc->in_use) {
+ instance->cona_status_biggest_free =
+ max((size_t)alloc->size,
+ (size_t)instance->cona_status_biggest_free);
+ }
+
+ ret = snprintf(*buf, buf_size_l, "paddr: %10x\tsize: %10u\t"
+ "in use: %1u\t used: %10u (%dMB)"
+ " \t free: %10u (%dMB)\n",
+ alloc->paddr,
+ alloc->size,
+ alloc->in_use,
+ instance->cona_status_used,
+ instance->cona_status_used/1024/1024,
+ instance->cona_status_free,
+ instance->cona_status_free/1024/1024);
+
+ if (ret < 0)
+ return -ENOMSG;
+ else if (ret + 1 > buf_size)
+ return -EINVAL;
+ }
+
+ *buf += ret;
+
+ return 0;
+}
+
+static int print_alloc_status(struct instance *instance, char **buf,
+ size_t buf_size)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ size_t buf_size_l;
+ if (i == 0)
+ buf_size_l = 0;
+ else
+ buf_size_l = buf_size;
+
+ ret = snprintf(*buf, buf_size_l, "Overall peak usage:\t%10u "
+ "(%dMB)\nCurrent max usage:\t%10u (%dMB)\n"
+ "Current biggest free:\t%10d (%dMB)\n",
+ instance->cona_status_max_check,
+ instance->cona_status_max_check/1024/1024,
+ instance->cona_status_max_cont,
+ instance->cona_status_max_cont/1024/1024,
+ instance->cona_status_biggest_free,
+ instance->cona_status_biggest_free/1024/1024);
+
+ if (ret < 0)
+ return -ENOMSG;
+ else if (ret + 1 > buf_size)
+ return -EINVAL;
+ }
+
+ *buf += ret;
+
+ return 0;
+}
+
+static struct instance *get_instance_from_file(struct file *file)
+{
+ struct instance *curr_instance;
+
+ list_for_each_entry(curr_instance, &instance_list, list) {
+ if (file->f_dentry->d_inode == curr_instance->debugfs_inode)
+ return curr_instance;
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+static int debugfs_allocs_read(struct file *file, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ /*
+ * We assume the supplied buffer and PAGE_SIZE is large enough to hold
+ * information about at least one alloc, if not no data will be
+ * returned.
+ */
+
+ int ret;
+ struct instance *instance;
+ struct alloc *curr_alloc;
+ char *local_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ char *local_buf_pos = local_buf;
+ size_t available_space = min((size_t)PAGE_SIZE, count);
+ /* private_data is intialized to NULL in open which I assume is 0. */
+ void **curr_pos = &file->private_data;
+ size_t bytes_read;
+ bool readout_aborted = false;
+
+ if (local_buf == NULL)
+ return -ENOMEM;
+
+ mutex_lock(&lock);
+ instance = get_instance_from_file(file);
+ if (IS_ERR(instance)) {
+ ret = PTR_ERR(instance);
+ goto out;
+ }
+
+ list_for_each_entry(curr_alloc, &instance->alloc_list, list) {
+ phys_addr_t alloc_offset = get_alloc_offset(instance,
+ curr_alloc);
+ if (alloc_offset < (phys_addr_t)*curr_pos)
+ continue;
+
+ ret = print_alloc(instance, curr_alloc, &local_buf_pos,
+ available_space - (size_t)(local_buf_pos -
+ local_buf));
+
+ if (ret == -EINVAL) { /* No more room */
+ readout_aborted = true;
+ break;
+ } else if (ret < 0) {
+ goto out;
+ }
+ /*
+ * There could be an overflow issue here in the unlikely case
+ * where the region is placed at the end of the address range
+ * and the last alloc is 1 byte large. Since this is debug code
+ * and that case most likely never will happen I've chosen to
+ * defer fixing it till it happens.
+ */
+ *curr_pos = (void *)(alloc_offset + 1);
+
+ /* Make sure to also print status if there were any prints */
+ instance->cona_status_printed = false;
+ }
+
+ if (!readout_aborted && !instance->cona_status_printed) {
+ ret = print_alloc_status(instance, &local_buf_pos,
+ available_space -
+ (size_t)(local_buf_pos - local_buf));
+
+ if (ret == -EINVAL) /* No more room */
+ readout_aborted = true;
+ else if (ret < 0)
+ goto out;
+ else
+ instance->cona_status_printed = true;
+ }
+
+ if (!readout_aborted) {
+ instance->cona_status_free = 0;
+ instance->cona_status_used = 0;
+ instance->cona_status_biggest_free = 0;
+ }
+
+ bytes_read = (size_t)(local_buf_pos - local_buf);
+
+ ret = copy_to_user(buf, local_buf, bytes_read);
+ if (ret < 0)
+ goto out;
+
+ ret = bytes_read;
+
+out:
+ kfree(local_buf);
+ mutex_unlock(&lock);
+
+ return ret;
+}
+
+static int __init init_debugfs(void)
+{
+ struct instance *curr_instance;
+ struct dentry *debugfs_root_dir = debugfs_create_dir("cona", NULL);
+
+ mutex_lock(&lock);
+
+ list_for_each_entry(curr_instance, &instance_list, list) {
+ struct dentry *file_dentry;
+ char tmp_str[MAX_INSTANCE_NAME_LENGTH + 7 + 1];
+ tmp_str[0] = '\0';
+ strcat(tmp_str, curr_instance->name);
+ strcat(tmp_str, "_allocs");
+ file_dentry = debugfs_create_file(tmp_str, 0444,
+ debugfs_root_dir, 0, &debugfs_allocs_fops);
+ if (file_dentry != NULL)
+ curr_instance->debugfs_inode = file_dentry->d_inode;
+ }
+
+ mutex_unlock(&lock);
+
+ return 0;
+}
+/*
+ * Must be executed after all instances have been created, hence the
+ * late_initcall.
+ */
+late_initcall(init_debugfs);
+
+#endif /* #ifdef CONFIG_DEBUG_FS */
diff --git a/drivers/misc/hwmem/hwmem-ioctl.c b/drivers/misc/hwmem/hwmem-ioctl.c
new file mode 100644
index 00000000000..e9e50de78bd
--- /dev/null
+++ b/drivers/misc/hwmem/hwmem-ioctl.c
@@ -0,0 +1,532 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Hardware memory driver, hwmem
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/idr.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/mm_types.h>
+#include <linux/hwmem.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+
+static int hwmem_open(struct inode *inode, struct file *file);
+static int hwmem_ioctl_mmap(struct file *file, struct vm_area_struct *vma);
+static int hwmem_release_fop(struct inode *inode, struct file *file);
+static long hwmem_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg);
+static unsigned long hwmem_get_unmapped_area(struct file *file,
+ unsigned long addr, unsigned long len, unsigned long pgoff,
+ unsigned long flags);
+
+static const struct file_operations hwmem_fops = {
+ .open = hwmem_open,
+ .mmap = hwmem_ioctl_mmap,
+ .unlocked_ioctl = hwmem_ioctl,
+ .release = hwmem_release_fop,
+ .get_unmapped_area = hwmem_get_unmapped_area,
+};
+
+static struct miscdevice hwmem_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "hwmem",
+ .fops = &hwmem_fops,
+};
+
+struct hwmem_file {
+ struct mutex lock;
+ struct idr idr; /* id -> struct hwmem_alloc*, ref counted */
+ struct hwmem_alloc *fd_alloc; /* Ref counted */
+};
+
+static s32 create_id(struct hwmem_file *hwfile, struct hwmem_alloc *alloc)
+{
+ int id, ret;
+
+ while (true) {
+ if (idr_pre_get(&hwfile->idr, GFP_KERNEL) == 0)
+ return -ENOMEM;
+
+ ret = idr_get_new_above(&hwfile->idr, alloc, 1, &id);
+ if (ret == 0)
+ break;
+ else if (ret != -EAGAIN)
+ return -ENOMEM;
+ }
+
+ /*
+ * IDR always returns the lowest free id so there is no wrapping issue
+ * because of this.
+ */
+ if (id >= (s32)1 << (31 - PAGE_SHIFT)) {
+ dev_err(hwmem_device.this_device, "Out of IDs!\n");
+ idr_remove(&hwfile->idr, id);
+ return -ENOMSG;
+ }
+
+ return (s32)id << PAGE_SHIFT;
+}
+
+static void remove_id(struct hwmem_file *hwfile, s32 id)
+{
+ idr_remove(&hwfile->idr, id >> PAGE_SHIFT);
+}
+
+static struct hwmem_alloc *resolve_id(struct hwmem_file *hwfile, s32 id)
+{
+ struct hwmem_alloc *alloc;
+
+ alloc = id ? idr_find(&hwfile->idr, id >> PAGE_SHIFT) :
+ hwfile->fd_alloc;
+ if (alloc == NULL)
+ alloc = ERR_PTR(-EINVAL);
+
+ return alloc;
+}
+
+static s32 alloc(struct hwmem_file *hwfile, struct hwmem_alloc_request *req)
+{
+ s32 ret = 0;
+ struct hwmem_alloc *alloc;
+
+ alloc = hwmem_alloc(req->size, req->flags, req->default_access,
+ req->mem_type);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ ret = create_id(hwfile, alloc);
+ if (ret < 0)
+ hwmem_release(alloc);
+
+ return ret;
+}
+
+static int alloc_fd(struct hwmem_file *hwfile, struct hwmem_alloc_request *req)
+{
+ struct hwmem_alloc *alloc;
+
+ if (hwfile->fd_alloc)
+ return -EINVAL;
+
+ alloc = hwmem_alloc(req->size, req->flags, req->default_access,
+ req->mem_type);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ hwfile->fd_alloc = alloc;
+
+ return 0;
+}
+
+static int release(struct hwmem_file *hwfile, s32 id)
+{
+ struct hwmem_alloc *alloc;
+
+ if (id == 0)
+ return -EINVAL;
+
+ alloc = resolve_id(hwfile, id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ remove_id(hwfile, id);
+ hwmem_release(alloc);
+
+ return 0;
+}
+
+static int set_cpu_domain(struct hwmem_file *hwfile,
+ struct hwmem_set_domain_request *req)
+{
+ struct hwmem_alloc *alloc;
+
+ alloc = resolve_id(hwfile, req->id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ return hwmem_set_domain(alloc, req->access, HWMEM_DOMAIN_CPU,
+ (struct hwmem_region *)&req->region);
+}
+
+static int set_sync_domain(struct hwmem_file *hwfile,
+ struct hwmem_set_domain_request *req)
+{
+ struct hwmem_alloc *alloc;
+
+ alloc = resolve_id(hwfile, req->id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ return hwmem_set_domain(alloc, req->access, HWMEM_DOMAIN_SYNC,
+ (struct hwmem_region *)&req->region);
+}
+
+static int pin(struct hwmem_file *hwfile, struct hwmem_pin_request *req)
+{
+ int ret;
+ struct hwmem_alloc *alloc;
+ enum hwmem_mem_type mem_type;
+ struct hwmem_mem_chunk mem_chunk;
+ size_t mem_chunk_length = 1;
+
+ alloc = resolve_id(hwfile, req->id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ hwmem_get_info(alloc, NULL, &mem_type, NULL);
+ if (mem_type != HWMEM_MEM_CONTIGUOUS_SYS)
+ return -EINVAL;
+
+ ret = hwmem_pin(alloc, &mem_chunk, &mem_chunk_length);
+ if (ret < 0)
+ return ret;
+
+ req->phys_addr = mem_chunk.paddr;
+
+ return 0;
+}
+
+static int unpin(struct hwmem_file *hwfile, s32 id)
+{
+ struct hwmem_alloc *alloc;
+
+ alloc = resolve_id(hwfile, id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ hwmem_unpin(alloc);
+
+ return 0;
+}
+
+static int set_access(struct hwmem_file *hwfile,
+ struct hwmem_set_access_request *req)
+{
+ struct hwmem_alloc *alloc;
+
+ alloc = resolve_id(hwfile, req->id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ return hwmem_set_access(alloc, req->access, req->pid);
+}
+
+static int get_info(struct hwmem_file *hwfile,
+ struct hwmem_get_info_request *req)
+{
+ struct hwmem_alloc *alloc;
+
+ alloc = resolve_id(hwfile, req->id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ hwmem_get_info(alloc, &req->size, &req->mem_type, &req->access);
+
+ return 0;
+}
+
+static s32 export(struct hwmem_file *hwfile, s32 id)
+{
+ s32 ret;
+ struct hwmem_alloc *alloc;
+ enum hwmem_access access;
+
+ alloc = resolve_id(hwfile, id);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ /*
+ * The user could be about to send the buffer to a driver but
+ * there is a chance the current thread group don't have import rights
+ * if it gained access to the buffer via a inter-process fd transfer
+ * (fork, Android binder), if this is the case the driver will not be
+ * able to resolve the buffer name. To avoid this situation we give the
+ * current thread group import rights. This will not breach the
+ * security as the process already has access to the buffer (otherwise
+ * it would not be able to get here).
+ */
+ hwmem_get_info(alloc, NULL, NULL, &access);
+
+ ret = hwmem_set_access(alloc, (access | HWMEM_ACCESS_IMPORT),
+ task_tgid_nr(current));
+ if (ret < 0)
+ return ret;
+
+ return hwmem_get_name(alloc);
+}
+
+static s32 import(struct hwmem_file *hwfile, s32 name)
+{
+ s32 ret = 0;
+ struct hwmem_alloc *alloc;
+ enum hwmem_access access;
+
+ alloc = hwmem_resolve_by_name(name);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ /* Check access permissions for process */
+ hwmem_get_info(alloc, NULL, NULL, &access);
+ if (!(access & HWMEM_ACCESS_IMPORT)) {
+ ret = -EPERM;
+ goto error;
+ }
+
+ ret = create_id(hwfile, alloc);
+ if (ret < 0)
+ goto error;
+
+ return ret;
+
+error:
+ hwmem_release(alloc);
+
+ return ret;
+}
+
+static int import_fd(struct hwmem_file *hwfile, s32 name)
+{
+ int ret;
+ struct hwmem_alloc *alloc;
+ enum hwmem_access access;
+
+ if (hwfile->fd_alloc)
+ return -EINVAL;
+
+ alloc = hwmem_resolve_by_name(name);
+ if (IS_ERR(alloc))
+ return PTR_ERR(alloc);
+
+ /* Check access permissions for process */
+ hwmem_get_info(alloc, NULL, NULL, &access);
+ if (!(access & HWMEM_ACCESS_IMPORT)) {
+ ret = -EPERM;
+ goto error;
+ }
+
+ hwfile->fd_alloc = alloc;
+
+ return 0;
+
+error:
+ hwmem_release(alloc);
+
+ return ret;
+}
+
+static int hwmem_open(struct inode *inode, struct file *file)
+{
+ struct hwmem_file *hwfile;
+
+ hwfile = kzalloc(sizeof(struct hwmem_file), GFP_KERNEL);
+ if (hwfile == NULL)
+ return -ENOMEM;
+
+ idr_init(&hwfile->idr);
+ mutex_init(&hwfile->lock);
+ file->private_data = hwfile;
+
+ return 0;
+}
+
+static int hwmem_ioctl_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ int ret;
+ struct hwmem_file *hwfile = (struct hwmem_file *)file->private_data;
+ struct hwmem_alloc *alloc;
+
+ mutex_lock(&hwfile->lock);
+
+ alloc = resolve_id(hwfile, (s32)vma->vm_pgoff << PAGE_SHIFT);
+ if (IS_ERR(alloc)) {
+ ret = PTR_ERR(alloc);
+ goto out;
+ }
+
+ ret = hwmem_mmap(alloc, vma);
+
+out:
+ mutex_unlock(&hwfile->lock);
+
+ return ret;
+}
+
+static int hwmem_release_idr_for_each_wrapper(int id, void *ptr, void *data)
+{
+ hwmem_release((struct hwmem_alloc *)ptr);
+
+ return 0;
+}
+
+static int hwmem_release_fop(struct inode *inode, struct file *file)
+{
+ struct hwmem_file *hwfile = (struct hwmem_file *)file->private_data;
+
+ idr_for_each(&hwfile->idr, hwmem_release_idr_for_each_wrapper, NULL);
+ idr_remove_all(&hwfile->idr);
+ idr_destroy(&hwfile->idr);
+
+ if (hwfile->fd_alloc)
+ hwmem_release(hwfile->fd_alloc);
+
+ mutex_destroy(&hwfile->lock);
+
+ kfree(hwfile);
+
+ return 0;
+}
+
+static long hwmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret = -ENOSYS;
+ struct hwmem_file *hwfile = (struct hwmem_file *)file->private_data;
+
+ mutex_lock(&hwfile->lock);
+
+ switch (cmd) {
+ case HWMEM_ALLOC_IOC:
+ {
+ struct hwmem_alloc_request req;
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_alloc_request)))
+ ret = -EFAULT;
+ else
+ ret = alloc(hwfile, &req);
+ }
+ break;
+ case HWMEM_ALLOC_FD_IOC:
+ {
+ struct hwmem_alloc_request req;
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_alloc_request)))
+ ret = -EFAULT;
+ else
+ ret = alloc_fd(hwfile, &req);
+ }
+ break;
+ case HWMEM_RELEASE_IOC:
+ ret = release(hwfile, (s32)arg);
+ break;
+ case HWMEM_SET_CPU_DOMAIN_IOC:
+ {
+ struct hwmem_set_domain_request req;
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_set_domain_request)))
+ ret = -EFAULT;
+ else
+ ret = set_cpu_domain(hwfile, &req);
+ }
+ break;
+ case HWMEM_SET_SYNC_DOMAIN_IOC:
+ {
+ struct hwmem_set_domain_request req;
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_set_domain_request)))
+ ret = -EFAULT;
+ else
+ ret = set_sync_domain(hwfile, &req);
+ }
+ break;
+ case HWMEM_PIN_IOC:
+ {
+ struct hwmem_pin_request req;
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_pin_request)))
+ ret = -EFAULT;
+ else
+ ret = pin(hwfile, &req);
+ if (ret == 0 && copy_to_user((void __user *)arg, &req,
+ sizeof(struct hwmem_pin_request)))
+ ret = -EFAULT;
+ }
+ break;
+ case HWMEM_UNPIN_IOC:
+ ret = unpin(hwfile, (s32)arg);
+ break;
+ case HWMEM_SET_ACCESS_IOC:
+ {
+ struct hwmem_set_access_request req;
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_set_access_request)))
+ ret = -EFAULT;
+ else
+ ret = set_access(hwfile, &req);
+ }
+ break;
+ case HWMEM_GET_INFO_IOC:
+ {
+ struct hwmem_get_info_request req;
+ if (copy_from_user(&req, (void __user *)arg,
+ sizeof(struct hwmem_get_info_request)))
+ ret = -EFAULT;
+ else
+ ret = get_info(hwfile, &req);
+ if (ret == 0 && copy_to_user((void __user *)arg, &req,
+ sizeof(struct hwmem_get_info_request)))
+ ret = -EFAULT;
+ }
+ break;
+ case HWMEM_EXPORT_IOC:
+ ret = export(hwfile, (s32)arg);
+ break;
+ case HWMEM_IMPORT_IOC:
+ ret = import(hwfile, (s32)arg);
+ break;
+ case HWMEM_IMPORT_FD_IOC:
+ ret = import_fd(hwfile, (s32)arg);
+ break;
+ }
+
+ mutex_unlock(&hwfile->lock);
+
+ return ret;
+}
+
+static unsigned long hwmem_get_unmapped_area(struct file *file,
+ unsigned long addr, unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ /*
+ * pgoff will not be valid as it contains a buffer id (right shifted
+ * PAGE_SHIFT bits). To not confuse get_unmapped_area we'll not pass
+ * on file or pgoff.
+ */
+ return current->mm->get_unmapped_area(NULL, addr, len, 0, flags);
+}
+
+int __init hwmem_ioctl_init(void)
+{
+ if (PAGE_SHIFT < 1 || PAGE_SHIFT > 30 || sizeof(size_t) != 4 ||
+ sizeof(int) > 4 || sizeof(enum hwmem_alloc_flags) != 4 ||
+ sizeof(enum hwmem_access) != 4 ||
+ sizeof(enum hwmem_mem_type) != 4) {
+ dev_err(hwmem_device.this_device, "PAGE_SHIFT < 1 || PAGE_SHIFT"
+ " > 30 || sizeof(size_t) != 4 || sizeof(int) > 4 ||"
+ " sizeof(enum hwmem_alloc_flags) != 4 || sizeof(enum"
+ " hwmem_access) != 4 || sizeof(enum hwmem_mem_type)"
+ " != 4\n");
+ return -ENOMSG;
+ }
+ if (PAGE_SHIFT > 15)
+ dev_warn(hwmem_device.this_device, "Due to the page size only"
+ " %u id:s per file instance are available\n",
+ ((u32)1 << (31 - PAGE_SHIFT)) - 1);
+
+ return misc_register(&hwmem_device);
+}
+
+void __exit hwmem_ioctl_exit(void)
+{
+ misc_deregister(&hwmem_device);
+}
diff --git a/drivers/misc/hwmem/hwmem-main.c b/drivers/misc/hwmem/hwmem-main.c
new file mode 100644
index 00000000000..b91d99bc2be
--- /dev/null
+++ b/drivers/misc/hwmem/hwmem-main.c
@@ -0,0 +1,726 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Hardware memory driver, hwmem
+ *
+ * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>,
+ * Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson.
+ *
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/pid.h>
+#include <linux/list.h>
+#include <linux/hwmem.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/kallsyms.h>
+#include <linux/vmalloc.h>
+#include "cache_handler.h"
+
+#define S32_MAX 2147483647
+
+struct hwmem_alloc_threadg_info {
+ struct list_head list;
+
+ struct pid *threadg_pid; /* Ref counted */
+
+ enum hwmem_access access;
+};
+
+struct hwmem_alloc {
+ struct list_head list;
+
+ atomic_t ref_cnt;
+
+ enum hwmem_alloc_flags flags;
+ struct hwmem_mem_type_struct *mem_type;
+
+ void *allocator_hndl;
+ phys_addr_t paddr;
+ void *kaddr;
+ size_t size;
+ s32 name;
+
+ /* Access control */
+ enum hwmem_access default_access;
+ struct list_head threadg_info_list;
+
+ /* Cache handling */
+ struct cach_buf cach_buf;
+
+#ifdef CONFIG_DEBUG_FS
+ /* Debug */
+ void *creator;
+ pid_t creator_tgid;
+#endif /* #ifdef CONFIG_DEBUG_FS */
+};
+
+static struct platform_device *hwdev;
+
+static LIST_HEAD(alloc_list);
+static DEFINE_IDR(global_idr);
+static DEFINE_MUTEX(lock);
+
+static void vm_open(struct vm_area_struct *vma);
+static void vm_close(struct vm_area_struct *vma);
+static struct vm_operations_struct vm_ops = {
+ .open = vm_open,
+ .close = vm_close,
+};
+
+static void kunmap_alloc(struct hwmem_alloc *alloc);
+
+/* Helpers */
+
+static void destroy_alloc_threadg_info(
+ struct hwmem_alloc_threadg_info *info)
+{
+ if (info->threadg_pid)
+ put_pid(info->threadg_pid);
+
+ kfree(info);
+}
+
+static void clean_alloc_threadg_info_list(struct hwmem_alloc *alloc)
+{
+ struct hwmem_alloc_threadg_info *info;
+ struct hwmem_alloc_threadg_info *tmp;
+
+ list_for_each_entry_safe(info, tmp, &(alloc->threadg_info_list),
+ list) {
+ list_del(&info->list);
+ destroy_alloc_threadg_info(info);
+ }
+}
+
+static enum hwmem_access get_access(struct hwmem_alloc *alloc)
+{
+ struct hwmem_alloc_threadg_info *info;
+ struct pid *my_pid;
+ bool found = false;
+
+ my_pid = find_get_pid(task_tgid_nr(current));
+ if (!my_pid)
+ return 0;
+
+ list_for_each_entry(info, &(alloc->threadg_info_list), list) {
+ if (info->threadg_pid == my_pid) {
+ found = true;
+ break;
+ }
+ }
+
+ put_pid(my_pid);
+
+ if (found)
+ return info->access;
+ else
+ return alloc->default_access;
+}
+
+static void clear_alloc_mem(struct hwmem_alloc *alloc)
+{
+ cach_set_domain(&alloc->cach_buf, HWMEM_ACCESS_WRITE,
+ HWMEM_DOMAIN_CPU, NULL);
+
+ memset(alloc->kaddr, 0, alloc->size);
+}
+
+static void destroy_alloc(struct hwmem_alloc *alloc)
+{
+ list_del(&alloc->list);
+
+ if (alloc->name != 0) {
+ idr_remove(&global_idr, alloc->name);
+ alloc->name = 0;
+ }
+
+ clean_alloc_threadg_info_list(alloc);
+
+ kunmap_alloc(alloc);
+
+ if (!IS_ERR_OR_NULL(alloc->allocator_hndl))
+ alloc->mem_type->allocator_api.free(
+ alloc->mem_type->allocator_instance,
+ alloc->allocator_hndl);
+
+ kfree(alloc);
+}
+
+static int kmap_alloc(struct hwmem_alloc *alloc)
+{
+ int ret;
+ pgprot_t pgprot;
+ void *alloc_kaddr;
+
+ alloc_kaddr = alloc->mem_type->allocator_api.get_alloc_kaddr(
+ alloc->mem_type->allocator_instance, alloc->allocator_hndl);
+ if (IS_ERR(alloc_kaddr))
+ return PTR_ERR(alloc_kaddr);
+
+ pgprot = PAGE_KERNEL;
+ cach_set_pgprot_cache_options(&alloc->cach_buf, &pgprot);
+
+ ret = ioremap_page_range((unsigned long)alloc_kaddr,
+ (unsigned long)alloc_kaddr + alloc->size, alloc->paddr, pgprot);
+ if (ret < 0) {
+ dev_warn(&hwdev->dev, "Failed to map %#x - %#x", alloc->paddr,
+ alloc->paddr + alloc->size);
+ return ret;
+ }
+
+ alloc->kaddr = alloc_kaddr;
+
+ return 0;
+}
+
+static void kunmap_alloc(struct hwmem_alloc *alloc)
+{
+ if (alloc->kaddr == NULL)
+ return;
+
+ unmap_kernel_range((unsigned long)alloc->kaddr, alloc->size);
+
+ alloc->kaddr = NULL;
+}
+
+static struct hwmem_mem_type_struct *resolve_mem_type(
+ enum hwmem_mem_type mem_type)
+{
+ unsigned int i;
+ for (i = 0; i < hwmem_num_mem_types; i++) {
+ if (hwmem_mem_types[i].id == mem_type)
+ return &hwmem_mem_types[i];
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+/* HWMEM API */
+
+struct hwmem_alloc *hwmem_alloc(size_t size, enum hwmem_alloc_flags flags,
+ enum hwmem_access def_access, enum hwmem_mem_type mem_type)
+{
+ int ret;
+ struct hwmem_alloc *alloc;
+
+ if (hwdev == NULL) {
+ printk(KERN_ERR "HWMEM: Badly configured\n");
+ return ERR_PTR(-ENOMSG);
+ }
+
+ if (size == 0)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&lock);
+
+ size = PAGE_ALIGN(size);
+
+ alloc = kzalloc(sizeof(struct hwmem_alloc), GFP_KERNEL);
+ if (alloc == NULL) {
+ ret = -ENOMEM;
+ goto alloc_alloc_failed;
+ }
+
+ INIT_LIST_HEAD(&alloc->list);
+ atomic_inc(&alloc->ref_cnt);
+ alloc->flags = flags;
+ alloc->default_access = def_access;
+ INIT_LIST_HEAD(&alloc->threadg_info_list);
+#ifdef CONFIG_DEBUG_FS
+ alloc->creator = __builtin_return_address(0);
+ alloc->creator_tgid = task_tgid_nr(current);
+#endif
+ alloc->mem_type = resolve_mem_type(mem_type);
+ if (IS_ERR(alloc->mem_type)) {
+ ret = PTR_ERR(alloc->mem_type);
+ goto resolve_mem_type_failed;
+ }
+
+ alloc->allocator_hndl = alloc->mem_type->allocator_api.alloc(
+ alloc->mem_type->allocator_instance, size);
+ if (IS_ERR(alloc->allocator_hndl)) {
+ ret = PTR_ERR(alloc->allocator_hndl);
+ goto allocator_failed;
+ }
+
+ alloc->paddr = alloc->mem_type->allocator_api.get_alloc_paddr(
+ alloc->allocator_hndl);
+ alloc->size = alloc->mem_type->allocator_api.get_alloc_size(
+ alloc->allocator_hndl);
+
+ cach_init_buf(&alloc->cach_buf, alloc->flags, alloc->size);
+ ret = kmap_alloc(alloc);
+ if (ret < 0)
+ goto kmap_alloc_failed;
+ cach_set_buf_addrs(&alloc->cach_buf, alloc->kaddr, alloc->paddr);
+
+ list_add_tail(&alloc->list, &alloc_list);
+
+ clear_alloc_mem(alloc);
+
+ goto out;
+
+kmap_alloc_failed:
+allocator_failed:
+resolve_mem_type_failed:
+ destroy_alloc(alloc);
+alloc_alloc_failed:
+ alloc = ERR_PTR(ret);
+
+out:
+ mutex_unlock(&lock);
+
+ return alloc;
+}
+EXPORT_SYMBOL(hwmem_alloc);
+
+void hwmem_release(struct hwmem_alloc *alloc)
+{
+ mutex_lock(&lock);
+
+ if (atomic_dec_and_test(&alloc->ref_cnt))
+ destroy_alloc(alloc);
+
+ mutex_unlock(&lock);
+}
+EXPORT_SYMBOL(hwmem_release);
+
+int hwmem_set_domain(struct hwmem_alloc *alloc, enum hwmem_access access,
+ enum hwmem_domain domain, struct hwmem_region *region)
+{
+ mutex_lock(&lock);
+
+ cach_set_domain(&alloc->cach_buf, access, domain, region);
+
+ mutex_unlock(&lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(hwmem_set_domain);
+
+int hwmem_pin(struct hwmem_alloc *alloc, struct hwmem_mem_chunk *mem_chunks,
+ u32 *mem_chunks_length)
+{
+ if (*mem_chunks_length < 1) {
+ *mem_chunks_length = 1;
+ return -ENOSPC;
+ }
+
+ mutex_lock(&lock);
+
+ mem_chunks[0].paddr = alloc->paddr;
+ mem_chunks[0].size = alloc->size;
+ *mem_chunks_length = 1;
+
+ mutex_unlock(&lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(hwmem_pin);
+
+void hwmem_unpin(struct hwmem_alloc *alloc)
+{
+}
+EXPORT_SYMBOL(hwmem_unpin);
+
+static void vm_open(struct vm_area_struct *vma)
+{
+ atomic_inc(&((struct hwmem_alloc *)vma->vm_private_data)->ref_cnt);
+}
+
+static void vm_close(struct vm_area_struct *vma)
+{
+ hwmem_release((struct hwmem_alloc *)vma->vm_private_data);
+}
+
+int hwmem_mmap(struct hwmem_alloc *alloc, struct vm_area_struct *vma)
+{
+ int ret = 0;
+ unsigned long vma_size = vma->vm_end - vma->vm_start;
+ enum hwmem_access access;
+ mutex_lock(&lock);
+
+ access = get_access(alloc);
+
+ /* Check permissions */
+ if ((!(access & HWMEM_ACCESS_WRITE) &&
+ (vma->vm_flags & VM_WRITE)) ||
+ (!(access & HWMEM_ACCESS_READ) &&
+ (vma->vm_flags & VM_READ))) {
+ ret = -EPERM;
+ goto illegal_access;
+ }
+
+ if (vma_size > alloc->size) {
+ ret = -EINVAL;
+ goto illegal_size;
+ }
+
+ /*
+ * We don't want Linux to do anything (merging etc) with our VMAs as
+ * the offset is not necessarily valid
+ */
+ vma->vm_flags |= VM_SPECIAL;
+ cach_set_pgprot_cache_options(&alloc->cach_buf, &vma->vm_page_prot);
+ vma->vm_private_data = (void *)alloc;
+ atomic_inc(&alloc->ref_cnt);
+ vma->vm_ops = &vm_ops;
+
+ ret = remap_pfn_range(vma, vma->vm_start, alloc->paddr >> PAGE_SHIFT,
+ min(vma_size, (unsigned long)alloc->size), vma->vm_page_prot);
+ if (ret < 0)
+ goto map_failed;
+
+ goto out;
+
+map_failed:
+ atomic_dec(&alloc->ref_cnt);
+illegal_size:
+illegal_access:
+
+out:
+ mutex_unlock(&lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(hwmem_mmap);
+
+void *hwmem_kmap(struct hwmem_alloc *alloc)
+{
+ void *ret;
+
+ mutex_lock(&lock);
+
+ ret = alloc->kaddr;
+
+ mutex_unlock(&lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(hwmem_kmap);
+
+void hwmem_kunmap(struct hwmem_alloc *alloc)
+{
+}
+EXPORT_SYMBOL(hwmem_kunmap);
+
+int hwmem_set_access(struct hwmem_alloc *alloc,
+ enum hwmem_access access, pid_t pid_nr)
+{
+ int ret;
+ struct hwmem_alloc_threadg_info *info;
+ struct pid *pid;
+ bool found = false;
+
+ pid = find_get_pid(pid_nr);
+ if (!pid) {
+ ret = -EINVAL;
+ goto error_get_pid;
+ }
+
+ list_for_each_entry(info, &(alloc->threadg_info_list), list) {
+ if (info->threadg_pid == pid) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ ret = -ENOMEM;
+ goto error_alloc_info;
+ }
+
+ info->threadg_pid = pid;
+ info->access = access;
+
+ list_add_tail(&(info->list), &(alloc->threadg_info_list));
+ } else {
+ info->access = access;
+ }
+
+ return 0;
+
+error_alloc_info:
+ put_pid(pid);
+error_get_pid:
+ return ret;
+}
+EXPORT_SYMBOL(hwmem_set_access);
+
+void hwmem_get_info(struct hwmem_alloc *alloc, u32 *size,
+ enum hwmem_mem_type *mem_type, enum hwmem_access *access)
+{
+ mutex_lock(&lock);
+
+ if (size != NULL)
+ *size = alloc->size;
+ if (mem_type != NULL)
+ *mem_type = alloc->mem_type->id;
+ if (access != NULL)
+ *access = get_access(alloc);
+
+ mutex_unlock(&lock);
+}
+EXPORT_SYMBOL(hwmem_get_info);
+
+s32 hwmem_get_name(struct hwmem_alloc *alloc)
+{
+ int ret = 0, name;
+
+ mutex_lock(&lock);
+
+ if (alloc->name != 0) {
+ ret = alloc->name;
+ goto out;
+ }
+
+ while (true) {
+ if (idr_pre_get(&global_idr, GFP_KERNEL) == 0) {
+ ret = -ENOMEM;
+ goto pre_get_id_failed;
+ }
+
+ ret = idr_get_new_above(&global_idr, alloc, 1, &name);
+ if (ret == 0)
+ break;
+ else if (ret != -EAGAIN)
+ goto get_id_failed;
+ }
+
+ if (name > S32_MAX) {
+ ret = -ENOMSG;
+ goto overflow;
+ }
+
+ alloc->name = name;
+
+ ret = name;
+ goto out;
+
+overflow:
+ idr_remove(&global_idr, name);
+get_id_failed:
+pre_get_id_failed:
+
+out:
+ mutex_unlock(&lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(hwmem_get_name);
+
+struct hwmem_alloc *hwmem_resolve_by_name(s32 name)
+{
+ struct hwmem_alloc *alloc;
+
+ mutex_lock(&lock);
+
+ alloc = idr_find(&global_idr, name);
+ if (alloc == NULL) {
+ alloc = ERR_PTR(-EINVAL);
+ goto find_failed;
+ }
+ atomic_inc(&alloc->ref_cnt);
+
+ goto out;
+
+find_failed:
+
+out:
+ mutex_unlock(&lock);
+
+ return alloc;
+}
+EXPORT_SYMBOL(hwmem_resolve_by_name);
+
+/* Debug */
+
+#ifdef CONFIG_DEBUG_FS
+
+static int debugfs_allocs_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos);
+
+static const struct file_operations debugfs_allocs_fops = {
+ .owner = THIS_MODULE,
+ .read = debugfs_allocs_read,
+};
+
+static int print_alloc(struct hwmem_alloc *alloc, char **buf, size_t buf_size)
+{
+ int ret;
+ char creator[KSYM_SYMBOL_LEN];
+ int i;
+
+ if (sprint_symbol(creator, (unsigned long)alloc->creator) < 0)
+ creator[0] = '\0';
+
+ for (i = 0; i < 2; i++) {
+ size_t buf_size_l;
+ if (i == 0)
+ buf_size_l = 0;
+ else
+ buf_size_l = buf_size;
+
+ ret = snprintf(*buf, buf_size_l,
+ "%#x\n"
+ "\tSize: %u\n"
+ "\tMemory type: %u\n"
+ "\tName: %#x\n"
+ "\tReference count: %i\n"
+ "\tAllocation flags: %#x\n"
+ "\t$ settings: %#x\n"
+ "\tDefault access: %#x\n"
+ "\tPhysical address: %#x\n"
+ "\tKernel virtual address: %#x\n"
+ "\tCreator: %s\n"
+ "\tCreator thread group id: %u\n",
+ (unsigned int)alloc, alloc->size, alloc->mem_type->id,
+ alloc->name, atomic_read(&alloc->ref_cnt),
+ alloc->flags, alloc->cach_buf.cache_settings,
+ alloc->default_access, alloc->paddr,
+ (unsigned int)alloc->kaddr, creator,
+ alloc->creator_tgid);
+ if (ret < 0)
+ return -ENOMSG;
+ else if (ret + 1 > buf_size)
+ return -EINVAL;
+ }
+
+ *buf += ret;
+
+ return 0;
+}
+
+static int debugfs_allocs_read(struct file *file, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ /*
+ * We assume the supplied buffer and PAGE_SIZE is large enough to hold
+ * information about at least one alloc, if not no data will be
+ * returned.
+ */
+
+ int ret;
+ size_t i = 0;
+ struct hwmem_alloc *curr_alloc;
+ char *local_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ char *local_buf_pos = local_buf;
+ size_t available_space = min((size_t)PAGE_SIZE, count);
+ /* private_data is intialized to NULL in open which I assume is 0. */
+ void **curr_pos = &file->private_data;
+ size_t bytes_read;
+
+ if (local_buf == NULL)
+ return -ENOMEM;
+
+ mutex_lock(&lock);
+
+ list_for_each_entry(curr_alloc, &alloc_list, list) {
+ if (i++ < (size_t)*curr_pos)
+ continue;
+
+ ret = print_alloc(curr_alloc, &local_buf_pos, available_space -
+ (size_t)(local_buf_pos - local_buf));
+ if (ret == -EINVAL) /* No more room */
+ break;
+ else if (ret < 0)
+ goto out;
+
+ *curr_pos = (void *)i;
+ }
+
+ bytes_read = (size_t)(local_buf_pos - local_buf);
+
+ ret = copy_to_user(buf, local_buf, bytes_read);
+ if (ret < 0)
+ goto out;
+
+ ret = bytes_read;
+
+out:
+ kfree(local_buf);
+
+ mutex_unlock(&lock);
+
+ return ret;
+}
+
+static void init_debugfs(void)
+{
+ /* Hwmem is never unloaded so dropping the dentrys is ok. */
+ struct dentry *debugfs_root_dir = debugfs_create_dir("hwmem", NULL);
+ (void)debugfs_create_file("allocs", 0444, debugfs_root_dir, 0,
+ &debugfs_allocs_fops);
+}
+
+#endif /* #ifdef CONFIG_DEBUG_FS */
+
+/* Module */
+
+extern int hwmem_ioctl_init(void);
+
+static int __devinit hwmem_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ if (hwdev) {
+ dev_err(&pdev->dev, "Probed multiple times\n");
+ return -EINVAL;
+ }
+
+ hwdev = pdev;
+
+ /*
+ * No need to flush the caches here. If we can keep track of the cache
+ * content then none of our memory will be in the caches, if we can't
+ * keep track of the cache content we always assume all our memory is
+ * in the caches.
+ */
+
+ ret = hwmem_ioctl_init();
+ if (ret < 0)
+ dev_warn(&pdev->dev, "Failed to start hwmem-ioctl, continuing"
+ " anyway\n");
+
+#ifdef CONFIG_DEBUG_FS
+ init_debugfs();
+#endif
+
+ dev_info(&pdev->dev, "Probed OK\n");
+
+ return 0;
+}
+
+static struct platform_driver hwmem_driver = {
+ .probe = hwmem_probe,
+ .driver = {
+ .name = "hwmem",
+ },
+};
+
+static int __init hwmem_init(void)
+{
+ return platform_driver_register(&hwmem_driver);
+}
+subsys_initcall(hwmem_init);
+
+MODULE_AUTHOR("Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Hardware memory driver");
+
diff --git a/drivers/misc/mbox.c b/drivers/misc/mbox.c
new file mode 100644
index 00000000000..d884496fa4c
--- /dev/null
+++ b/drivers/misc/mbox.c
@@ -0,0 +1,867 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Stefan Nilsson <stefan.xk.nilsson@stericsson.com> for ST-Ericsson.
+ * Author: Martin Persson <martin.persson@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+/*
+ * Mailbox nomenclature:
+ *
+ * APE MODEM
+ * mbox pairX
+ * ..........................
+ * . .
+ * . peer .
+ * . send ---- .
+ * . --> | | .
+ * . | | .
+ * . ---- .
+ * . .
+ * . local .
+ * . rec ---- .
+ * . | | <-- .
+ * . | | .
+ * . ---- .
+ * .........................
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/hrtimer.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <linux/mfd/db5500-prcmu.h>
+#include <mach/mbox-db5500.h>
+#include <mach/reboot_reasons.h>
+
+#define MBOX_NAME "mbox"
+
+#define MBOX_FIFO_DATA 0x000
+#define MBOX_FIFO_ADD 0x004
+#define MBOX_FIFO_REMOVE 0x008
+#define MBOX_FIFO_THRES_FREE 0x00C
+#define MBOX_FIFO_THRES_OCCUP 0x010
+#define MBOX_FIFO_STATUS 0x014
+
+#define MBOX_DISABLE_IRQ 0x4
+#define MBOX_ENABLE_IRQ 0x0
+#define MBOX_LATCH 1
+
+struct mbox_device_info {
+ struct mbox *mbox;
+ struct workqueue_struct *mbox_modem_rel_wq;
+ struct work_struct mbox_modem_rel;
+ struct completion mod_req_ack_work;
+ atomic_t ape_state;
+ atomic_t mod_req;
+ atomic_t mod_reset;
+};
+
+/* Global list of all mailboxes */
+struct hrtimer ape_timer;
+struct hrtimer modem_timer;
+static DEFINE_MUTEX(modem_state_mutex);
+static struct list_head mboxs = LIST_HEAD_INIT(mboxs);
+static struct mbox_device_info *mb;
+
+static enum hrtimer_restart mbox_ape_callback(struct hrtimer *hrtimer)
+{
+ queue_work(mb->mbox_modem_rel_wq, &mb->mbox_modem_rel);
+
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart mbox_mod_callback(struct hrtimer *hrtimer)
+{
+ atomic_set(&mb->ape_state, 0);
+ return HRTIMER_NORESTART;
+}
+
+static void mbox_modem_rel_work(struct work_struct *work)
+{
+ mutex_lock(&modem_state_mutex);
+ prcmu_modem_rel();
+ atomic_set(&mb->mod_req, 0);
+ mutex_unlock(&modem_state_mutex);
+}
+
+static void mbox_modem_req(void)
+{
+ mutex_lock(&modem_state_mutex);
+ if (!db5500_prcmu_is_modem_requested()) {
+ prcmu_modem_req();
+ /* TODO: optimize this timeout */
+ if (!wait_for_completion_timeout(&mb->mod_req_ack_work,
+ msecs_to_jiffies(2000)))
+ printk(KERN_ERR "mbox:modem_req_ack timedout(2sec)\n");
+ }
+ atomic_set(&mb->mod_req, 1);
+ mutex_unlock(&modem_state_mutex);
+}
+
+static struct mbox *get_mbox_with_id(u8 id)
+{
+ u8 i;
+ struct list_head *pos = &mboxs;
+ for (i = 0; i <= id; i++)
+ pos = pos->next;
+
+ return (struct mbox *) list_entry(pos, struct mbox, list);
+}
+
+int mbox_send(struct mbox *mbox, u32 mbox_msg, bool block)
+{
+ int res = 0;
+ unsigned long flag;
+
+ if (atomic_read(&mb->mod_reset)) {
+ dev_err(&mbox->pdev->dev,
+ "mbox_send called after modem reset\n");
+ return -EINVAL;
+ }
+ dev_dbg(&(mbox->pdev->dev),
+ "About to buffer 0x%X to mailbox 0x%X."
+ " ri = %d, wi = %d\n",
+ mbox_msg, (u32)mbox, mbox->read_index,
+ mbox->write_index);
+
+ /* Request for modem */
+ if (!db5500_prcmu_is_modem_requested())
+ mbox_modem_req();
+
+ spin_lock_irqsave(&mbox->lock, flag);
+ /* Check if write buffer is full */
+ while (((mbox->write_index + 1) % MBOX_BUF_SIZE) == mbox->read_index) {
+ if (!block) {
+ dev_dbg(&(mbox->pdev->dev),
+ "Buffer full in non-blocking call! "
+ "Returning -ENOMEM!\n");
+ res = -ENOMEM;
+ goto exit;
+ }
+ spin_unlock_irqrestore(&mbox->lock, flag);
+ dev_dbg(&(mbox->pdev->dev),
+ "Buffer full in blocking call! Sleeping...\n");
+ mbox->client_blocked = 1;
+ wait_for_completion(&mbox->buffer_available);
+ dev_dbg(&(mbox->pdev->dev),
+ "Blocking send was woken up! Trying again...\n");
+ spin_lock_irqsave(&mbox->lock, flag);
+ }
+
+ mbox->buffer[mbox->write_index] = mbox_msg;
+ mbox->write_index = (mbox->write_index + 1) % MBOX_BUF_SIZE;
+
+ /*
+ * Indicate that we want an IRQ as soon as there is a slot
+ * in the FIFO
+ */
+ if (atomic_read(&mb->mod_reset)) {
+ dev_err(&mbox->pdev->dev,
+ "modem is in reset state, cannot proceed\n");
+ res = -EINVAL;
+ goto exit;
+ }
+ writel(MBOX_ENABLE_IRQ, mbox->virtbase_peer + MBOX_FIFO_THRES_FREE);
+
+exit:
+ spin_unlock_irqrestore(&mbox->lock, flag);
+ return res;
+}
+EXPORT_SYMBOL(mbox_send);
+
+#if defined(CONFIG_DEBUG_FS)
+/*
+ * Expected input: <value> <nbr sends>
+ * Example: "echo 0xdeadbeef 4 > mbox-node" sends 0xdeadbeef 4 times
+ */
+static ssize_t mbox_write_fifo(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ unsigned long mbox_mess;
+ unsigned long nbr_sends;
+ unsigned long i;
+ char int_buf[16];
+ char *token;
+ char *val;
+
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mbox *mbox = platform_get_drvdata(pdev);
+
+ strncpy((char *) &int_buf, buf, sizeof(int_buf));
+ token = (char *) &int_buf;
+
+ /* Parse message */
+ val = strsep(&token, " ");
+ if ((val == NULL) || (strict_strtoul(val, 16, &mbox_mess) != 0))
+ mbox_mess = 0xDEADBEEF;
+
+ val = strsep(&token, " ");
+ if ((val == NULL) || (strict_strtoul(val, 10, &nbr_sends) != 0))
+ nbr_sends = 1;
+
+ dev_dbg(dev, "Will write 0x%lX %ld times using data struct at 0x%X\n",
+ mbox_mess, nbr_sends, (u32) mbox);
+
+ for (i = 0; i < nbr_sends; i++)
+ mbox_send(mbox, mbox_mess, true);
+
+ return count;
+}
+
+static ssize_t mbox_read_fifo(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int mbox_value;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mbox *mbox = platform_get_drvdata(pdev);
+
+ if (atomic_read(&mb->mod_reset)) {
+ dev_err(&mbox->pdev->dev, "modem crashed, returning\n");
+ return 0;
+ }
+ if ((readl(mbox->virtbase_local + MBOX_FIFO_STATUS) & 0x7) <= 0)
+ return sprintf(buf, "Mailbox is empty\n");
+
+ mbox_value = readl(mbox->virtbase_local + MBOX_FIFO_DATA);
+ writel(MBOX_LATCH, (mbox->virtbase_local + MBOX_FIFO_REMOVE));
+
+ return sprintf(buf, "0x%X\n", mbox_value);
+}
+
+static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
+
+static int mbox_show(struct seq_file *s, void *data)
+{
+ struct list_head *pos;
+ u8 mbox_index = 0;
+
+ list_for_each(pos, &mboxs) {
+ struct mbox *m =
+ (struct mbox *) list_entry(pos, struct mbox, list);
+ if (m == NULL) {
+ seq_printf(s,
+ "Unable to retrieve mailbox %d\n",
+ mbox_index);
+ continue;
+ }
+
+ spin_lock(&m->lock);
+ if ((m->virtbase_peer == NULL) || (m->virtbase_local == NULL)) {
+ seq_printf(s, "MAILBOX %d not setup or corrupt\n",
+ mbox_index);
+ spin_unlock(&m->lock);
+ continue;
+ }
+
+ if (atomic_read(&mb->mod_reset)) {
+ dev_err(&m->pdev->dev, "modem crashed, returning\n");
+ spin_unlock(&m->lock);
+ return 0;
+ }
+ seq_printf(s,
+ "===========================\n"
+ " MAILBOX %d\n"
+ " PEER MAILBOX DUMP\n"
+ "---------------------------\n"
+ "FIFO: 0x%X (%d)\n"
+ "Free Threshold: 0x%.2X (%d)\n"
+ "Occupied Threshold: 0x%.2X (%d)\n"
+ "Status: 0x%.2X (%d)\n"
+ " Free spaces (ot): %d (%d)\n"
+ " Occup spaces (ot): %d (%d)\n"
+ "===========================\n"
+ " LOCAL MAILBOX DUMP\n"
+ "---------------------------\n"
+ "FIFO: 0x%.X (%d)\n"
+ "Free Threshold: 0x%.2X (%d)\n"
+ "Occupied Threshold: 0x%.2X (%d)\n"
+ "Status: 0x%.2X (%d)\n"
+ " Free spaces (ot): %d (%d)\n"
+ " Occup spaces (ot): %d (%d)\n"
+ "===========================\n"
+ "write_index: %d\n"
+ "read_index : %d\n"
+ "===========================\n"
+ "\n",
+ mbox_index,
+ readl(m->virtbase_peer + MBOX_FIFO_DATA),
+ readl(m->virtbase_peer + MBOX_FIFO_DATA),
+ readl(m->virtbase_peer + MBOX_FIFO_THRES_FREE),
+ readl(m->virtbase_peer + MBOX_FIFO_THRES_FREE),
+ readl(m->virtbase_peer + MBOX_FIFO_THRES_OCCUP),
+ readl(m->virtbase_peer + MBOX_FIFO_THRES_OCCUP),
+ readl(m->virtbase_peer + MBOX_FIFO_STATUS),
+ readl(m->virtbase_peer + MBOX_FIFO_STATUS),
+ (readl(m->virtbase_peer + MBOX_FIFO_STATUS) >> 4) & 0x7,
+ (readl(m->virtbase_peer + MBOX_FIFO_STATUS) >> 7) & 0x1,
+ (readl(m->virtbase_peer + MBOX_FIFO_STATUS) >> 0) & 0x7,
+ (readl(m->virtbase_peer + MBOX_FIFO_STATUS) >> 3) & 0x1,
+ readl(m->virtbase_local + MBOX_FIFO_DATA),
+ readl(m->virtbase_local + MBOX_FIFO_DATA),
+ readl(m->virtbase_local + MBOX_FIFO_THRES_FREE),
+ readl(m->virtbase_local + MBOX_FIFO_THRES_FREE),
+ readl(m->virtbase_local + MBOX_FIFO_THRES_OCCUP),
+ readl(m->virtbase_local + MBOX_FIFO_THRES_OCCUP),
+ readl(m->virtbase_local + MBOX_FIFO_STATUS),
+ readl(m->virtbase_local + MBOX_FIFO_STATUS),
+ (readl(m->virtbase_local + MBOX_FIFO_STATUS) >> 4) & 0x7,
+ (readl(m->virtbase_local + MBOX_FIFO_STATUS) >> 7) & 0x1,
+ (readl(m->virtbase_local + MBOX_FIFO_STATUS) >> 0) & 0x7,
+ (readl(m->virtbase_local + MBOX_FIFO_STATUS) >> 3) & 0x1,
+ m->write_index, m->read_index);
+ mbox_index++;
+ spin_unlock(&m->lock);
+ }
+
+ return 0;
+}
+
+static int mbox_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mbox_show, NULL);
+}
+
+static const struct file_operations mbox_operations = {
+ .owner = THIS_MODULE,
+ .open = mbox_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+
+static irqreturn_t mbox_irq(int irq, void *arg)
+{
+ u32 mbox_value;
+ int nbr_occup;
+ int nbr_free;
+ struct mbox *mbox = (struct mbox *) arg;
+
+ if (atomic_read(&mb->mod_reset)) {
+ dev_err(&mbox->pdev->dev, "modem in reset state\n");
+ return IRQ_HANDLED;
+ }
+ spin_lock(&mbox->lock);
+
+ dev_dbg(&(mbox->pdev->dev),
+ "mbox IRQ [%d] received. ri = %d, wi = %d\n",
+ irq, mbox->read_index, mbox->write_index);
+
+ /*
+ * Check if we have any outgoing messages, and if there is space for
+ * them in the FIFO.
+ */
+ if (mbox->read_index != mbox->write_index) {
+ /*
+ * Check by reading FREE for LOCAL since that indicates
+ * OCCUP for PEER
+ */
+ nbr_free = (readl(mbox->virtbase_local + MBOX_FIFO_STATUS)
+ >> 4) & 0x7;
+ dev_dbg(&(mbox->pdev->dev),
+ "Status indicates %d empty spaces in the FIFO!\n",
+ nbr_free);
+
+ while ((nbr_free > 0) &&
+ (mbox->read_index != mbox->write_index)) {
+ if (atomic_read(&mb->mod_reset)) {
+ dev_err(&mbox->pdev->dev,
+ "modem in reset state\n");
+ goto exit;
+ }
+ /* Write the message and latch it into the FIFO */
+ writel(mbox->buffer[mbox->read_index],
+ (mbox->virtbase_peer + MBOX_FIFO_DATA));
+ writel(MBOX_LATCH,
+ (mbox->virtbase_peer + MBOX_FIFO_ADD));
+ dev_dbg(&(mbox->pdev->dev),
+ "Wrote message 0x%X to addr 0x%X\n",
+ mbox->buffer[mbox->read_index],
+ (u32) (mbox->virtbase_peer + MBOX_FIFO_DATA));
+
+ nbr_free--;
+ mbox->read_index =
+ (mbox->read_index + 1) % MBOX_BUF_SIZE;
+ }
+
+ if (atomic_read(&mb->mod_reset)) {
+ dev_err(&mbox->pdev->dev, "modem in reset state\n");
+ goto exit;
+ }
+ /*
+ * Check if we still want IRQ:s when there is free
+ * space to send
+ */
+ if (mbox->read_index != mbox->write_index) {
+ dev_dbg(&(mbox->pdev->dev),
+ "Still have messages to send, but FIFO full. "
+ "Request IRQ again!\n");
+ writel(MBOX_ENABLE_IRQ,
+ mbox->virtbase_peer + MBOX_FIFO_THRES_FREE);
+ } else {
+ dev_dbg(&(mbox->pdev->dev),
+ "No more messages to send. "
+ "Do not request IRQ again!\n");
+ writel(MBOX_DISABLE_IRQ,
+ mbox->virtbase_peer + MBOX_FIFO_THRES_FREE);
+ }
+
+ /*
+ * Check if we can signal any blocked clients that it is OK to
+ * start buffering again
+ */
+ if (mbox->client_blocked &&
+ (((mbox->write_index + 1) % MBOX_BUF_SIZE)
+ != mbox->read_index)) {
+ dev_dbg(&(mbox->pdev->dev),
+ "Waking up blocked client\n");
+ complete(&mbox->buffer_available);
+ mbox->client_blocked = 0;
+ }
+ }
+
+ /* Start timer and on timer expiry call modem_rel */
+ hrtimer_start(&ape_timer, ktime_set(0, 10*NSEC_PER_MSEC),
+ HRTIMER_MODE_REL);
+
+ if (atomic_read(&mb->mod_reset)) {
+ dev_err(&mbox->pdev->dev, "modem in reset state\n");
+ goto exit;
+ }
+ /* Check if we have any incoming messages */
+ nbr_occup = readl(mbox->virtbase_local + MBOX_FIFO_STATUS) & 0x7;
+ if (nbr_occup == 0)
+ goto exit;
+
+redo:
+ if (mbox->cb == NULL) {
+ dev_dbg(&(mbox->pdev->dev), "No receive callback registered, "
+ "leaving %d incoming messages in fifo!\n", nbr_occup);
+ goto exit;
+ }
+ atomic_set(&mb->ape_state, 1);
+
+ if (atomic_read(&mb->mod_reset)) {
+ dev_err(&mbox->pdev->dev, "modem in reset state\n");
+ goto exit;
+ }
+ /* Read and acknowledge the message */
+ mbox_value = readl(mbox->virtbase_local + MBOX_FIFO_DATA);
+ writel(MBOX_LATCH, (mbox->virtbase_local + MBOX_FIFO_REMOVE));
+
+ /* Notify consumer of new mailbox message */
+ dev_dbg(&(mbox->pdev->dev), "Calling callback for message 0x%X!\n",
+ mbox_value);
+ mbox->cb(mbox_value, mbox->client_data);
+
+ nbr_occup = readl(mbox->virtbase_local + MBOX_FIFO_STATUS) & 0x7;
+
+ if (nbr_occup > 0)
+ goto redo;
+
+ /* Start a timer and timer expiry will be the criteria for sleep */
+ hrtimer_start(&modem_timer, ktime_set(0, 100*MSEC_PER_SEC),
+ HRTIMER_MODE_REL);
+exit:
+ dev_dbg(&(mbox->pdev->dev), "Exit mbox IRQ. ri = %d, wi = %d\n",
+ mbox->read_index, mbox->write_index);
+ spin_unlock(&mbox->lock);
+
+ return IRQ_HANDLED;
+}
+
+static void mbox_shutdown(struct mbox *mbox)
+{
+ if (!mbox->allocated)
+ return;
+#if defined(CONFIG_DEBUG_FS)
+ debugfs_remove(mbox->dentry);
+ device_remove_file(&mbox->pdev->dev, &dev_attr_fifo);
+#endif
+ /* TODO: Need to check if we can write after modem reset */
+ if (!atomic_read(&mb->mod_reset)) {
+ writel(MBOX_DISABLE_IRQ, mbox->virtbase_local +
+ MBOX_FIFO_THRES_OCCUP);
+ writel(MBOX_DISABLE_IRQ, mbox->virtbase_peer +
+ MBOX_FIFO_THRES_FREE);
+ }
+ free_irq(mbox->irq, (void *)mbox);
+ mbox->client_blocked = 0;
+ iounmap(mbox->virtbase_local);
+ iounmap(mbox->virtbase_peer);
+ mbox->cb = NULL;
+ mbox->client_data = NULL;
+ mbox->allocated = false;
+}
+
+/** mbox_state_reset - Reset the mailbox state machine
+ *
+ * This function is called on receiving modem reset interrupt. Reset all
+ * the mailbox state machine, disable irq, cancel timers, shutdown the
+ * mailboxs and re-enable irq's.
+ */
+void mbox_state_reset(void)
+{
+ struct mbox *mbox = mb->mbox;
+
+ /* Common for all mailbox */
+ atomic_set(&mb->mod_reset, 1);
+
+ /* Disable IRQ */
+ disable_irq_nosync(IRQ_DB5500_PRCMU_AC_WAKE_ACK);
+
+ /* Cancel sleep_req timers */
+ hrtimer_cancel(&modem_timer);
+ hrtimer_cancel(&ape_timer);
+
+ /* specific to each mailbox */
+ list_for_each_entry(mbox, &mboxs, list) {
+ mbox_shutdown(mbox);
+ }
+
+ /* Reset mailbox state machine */
+ atomic_set(&mb->mod_req, 0);
+ atomic_set(&mb->ape_state, 0);
+
+ /* Enable irq */
+ enable_irq(IRQ_DB5500_PRCMU_AC_WAKE_ACK);
+}
+
+
+/* Setup is executed once for each mbox pair */
+struct mbox *mbox_setup(u8 mbox_id, mbox_recv_cb_t *mbox_cb, void *priv)
+{
+ struct resource *resource;
+ int res;
+ struct mbox *mbox;
+
+ /*
+ * set mod_reset flag to '0', clients calling this APE should make sure
+ * that modem is rebooted after MSR. Mailbox doesnt have any means of
+ * knowing the boot status of modem.
+ */
+ atomic_set(&mb->mod_reset, 0);
+
+ mbox = get_mbox_with_id(mbox_id);
+ if (mbox == NULL) {
+ dev_err(&(mbox->pdev->dev), "Incorrect mailbox id: %d!\n",
+ mbox_id);
+ goto exit;
+ }
+
+ /*
+ * Check if mailbox has been allocated to someone else,
+ * otherwise allocate it
+ */
+ if (mbox->allocated) {
+ dev_err(&(mbox->pdev->dev), "Mailbox number %d is busy!\n",
+ mbox_id);
+ mbox = NULL;
+ goto exit;
+ }
+ mbox->allocated = true;
+
+ dev_dbg(&(mbox->pdev->dev), "Initiating mailbox number %d: 0x%X...\n",
+ mbox_id, (u32)mbox);
+
+ mbox->client_data = priv;
+ mbox->cb = mbox_cb;
+
+ /* Get addr for peer mailbox and ioremap it */
+ resource = platform_get_resource_byname(mbox->pdev,
+ IORESOURCE_MEM,
+ "mbox_peer");
+ if (resource == NULL) {
+ dev_err(&(mbox->pdev->dev),
+ "Unable to retrieve mbox peer resource\n");
+ mbox = NULL;
+ goto free_mbox;
+ }
+ dev_dbg(&(mbox->pdev->dev),
+ "Resource name: %s start: 0x%X, end: 0x%X\n",
+ resource->name, resource->start, resource->end);
+ mbox->virtbase_peer = ioremap(resource->start, resource_size(resource));
+ if (!mbox->virtbase_peer) {
+ dev_err(&(mbox->pdev->dev), "Unable to ioremap peer mbox\n");
+ mbox = NULL;
+ goto free_mbox;
+ }
+ dev_dbg(&(mbox->pdev->dev),
+ "ioremapped peer physical: (0x%X-0x%X) to virtual: 0x%X\n",
+ resource->start, resource->end, (u32) mbox->virtbase_peer);
+
+ /* Get addr for local mailbox and ioremap it */
+ resource = platform_get_resource_byname(mbox->pdev,
+ IORESOURCE_MEM,
+ "mbox_local");
+ if (resource == NULL) {
+ dev_err(&(mbox->pdev->dev),
+ "Unable to retrieve mbox local resource\n");
+ mbox = NULL;
+ goto free_map;
+ }
+ dev_dbg(&(mbox->pdev->dev),
+ "Resource name: %s start: 0x%X, end: 0x%X\n",
+ resource->name, resource->start, resource->end);
+ mbox->virtbase_local = ioremap(resource->start, resource_size(resource));
+ if (!mbox->virtbase_local) {
+ dev_err(&(mbox->pdev->dev), "Unable to ioremap local mbox\n");
+ mbox = NULL;
+ goto free_map;
+ }
+ dev_dbg(&(mbox->pdev->dev),
+ "ioremapped local physical: (0x%X-0x%X) to virtual: 0x%X\n",
+ resource->start, resource->end, (u32) mbox->virtbase_peer);
+
+ init_completion(&mbox->buffer_available);
+ mbox->client_blocked = 0;
+
+ /* Get IRQ for mailbox and allocate it */
+ mbox->irq = platform_get_irq_byname(mbox->pdev, "mbox_irq");
+ if (mbox->irq < 0) {
+ dev_err(&(mbox->pdev->dev),
+ "Unable to retrieve mbox irq resource\n");
+ mbox = NULL;
+ goto free_map1;
+ }
+
+ dev_dbg(&(mbox->pdev->dev), "Allocating irq %d...\n", mbox->irq);
+ res = request_threaded_irq(mbox->irq, NULL, mbox_irq,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ mbox->name, (void *) mbox);
+ if (res < 0) {
+ dev_err(&(mbox->pdev->dev),
+ "Unable to allocate mbox irq %d\n", mbox->irq);
+ mbox = NULL;
+ goto exit;
+ }
+
+ /* check if modem has reset */
+ if (atomic_read(&mb->mod_reset)) {
+ dev_err(&mbox->pdev->dev,
+ "modem is in reset state, cannot proceed\n");
+ mbox = NULL;
+ goto free_irq;
+ }
+ /* Set up mailbox to not launch IRQ on free space in mailbox */
+ writel(MBOX_DISABLE_IRQ, mbox->virtbase_peer + MBOX_FIFO_THRES_FREE);
+
+ /*
+ * Set up mailbox to launch IRQ on new message if we have
+ * a callback set. If not, do not raise IRQ, but keep message
+ * in FIFO for manual retrieval
+ */
+ if (mbox_cb != NULL)
+ writel(MBOX_ENABLE_IRQ,
+ mbox->virtbase_local + MBOX_FIFO_THRES_OCCUP);
+ else
+ writel(MBOX_DISABLE_IRQ,
+ mbox->virtbase_local + MBOX_FIFO_THRES_OCCUP);
+
+#if defined(CONFIG_DEBUG_FS)
+ res = device_create_file(&(mbox->pdev->dev), &dev_attr_fifo);
+ if (res != 0)
+ dev_warn(&(mbox->pdev->dev),
+ "Unable to create mbox sysfs entry");
+
+ mbox->dentry = debugfs_create_file("mbox", S_IFREG | S_IRUGO, NULL,
+ NULL, &mbox_operations);
+#endif
+ dev_info(&(mbox->pdev->dev),
+ "Mailbox driver with index %d initiated!\n", mbox_id);
+
+ return mbox;
+free_irq:
+ free_irq(mbox->irq, (void *)mbox);
+free_map1:
+ iounmap(mbox->virtbase_local);
+free_map:
+ iounmap(mbox->virtbase_peer);
+free_mbox:
+ mbox->client_data = NULL;
+ mbox->cb = NULL;
+exit:
+ return mbox;
+}
+EXPORT_SYMBOL(mbox_setup);
+
+static irqreturn_t mbox_prcmu_mod_req_ack_handler(int irq, void *data)
+{
+ complete(&mb->mod_req_ack_work);
+ return IRQ_HANDLED;
+}
+
+int __init mbox_probe(struct platform_device *pdev)
+{
+ struct mbox *mbox;
+ int res = 0;
+ dev_dbg(&(pdev->dev), "Probing mailbox (pdev = 0x%X)...\n", (u32) pdev);
+
+ mbox = kzalloc(sizeof(struct mbox), GFP_KERNEL);
+ if (mbox == NULL) {
+ dev_err(&pdev->dev,
+ "Could not allocate memory for struct mbox\n");
+ return -ENOMEM;
+ }
+
+ mbox->pdev = pdev;
+ mbox->write_index = 0;
+ mbox->read_index = 0;
+
+ INIT_LIST_HEAD(&(mbox->list));
+ list_add_tail(&(mbox->list), &mboxs);
+
+ sprintf(mbox->name, "%s", MBOX_NAME);
+ spin_lock_init(&mbox->lock);
+
+ platform_set_drvdata(pdev, mbox);
+ mb->mbox = mbox;
+ dev_info(&(pdev->dev), "Mailbox driver loaded\n");
+
+ return res;
+}
+
+static int __exit mbox_remove(struct platform_device *pdev)
+{
+ struct mbox *mbox = platform_get_drvdata(pdev);
+
+ hrtimer_cancel(&ape_timer);
+ hrtimer_cancel(&modem_timer);
+ mbox_shutdown(mbox);
+ list_del(&mbox->list);
+ kfree(mbox);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+int mbox_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mbox *mbox = platform_get_drvdata(pdev);
+
+ /*
+ * Nothing to be done for now, once APE-Modem power management is
+ * in place communication will have to be stopped.
+ */
+
+ list_for_each_entry(mbox, &mboxs, list) {
+ if (mbox->client_blocked)
+ return -EBUSY;
+ }
+ dev_dbg(dev, "APE_STATE = %d\n", atomic_read(&mb->ape_state));
+ dev_dbg(dev, "MODEM_STATE = %d\n", db5500_prcmu_is_modem_requested());
+ if (atomic_read(&mb->ape_state) || db5500_prcmu_is_modem_requested() ||
+ atomic_read(&mb->mod_req))
+ return -EBUSY;
+ return 0;
+}
+
+int mbox_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mbox *mbox = platform_get_drvdata(pdev);
+
+ /*
+ * Nothing to be done for now, once APE-Modem power management is
+ * in place communication will have to be resumed.
+ */
+
+ return 0;
+}
+
+static const struct dev_pm_ops mbox_dev_pm_ops = {
+ .suspend_noirq = mbox_suspend,
+ .resume_noirq = mbox_resume,
+};
+#endif
+
+static struct platform_driver mbox_driver = {
+ .remove = __exit_p(mbox_remove),
+ .driver = {
+ .name = MBOX_NAME,
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &mbox_dev_pm_ops,
+#endif
+ },
+};
+
+static int __init mbox_init(void)
+{
+ struct mbox_device_info *mb_di;
+ int err;
+
+ mb_di = kzalloc(sizeof(struct mbox_device_info), GFP_KERNEL);
+ if (mb_di == NULL) {
+ printk(KERN_ERR
+ "mbox:Could not allocate memory for struct mbox_device_info\n");
+ return -ENOMEM;
+ }
+
+ mb_di->mbox_modem_rel_wq = create_singlethread_workqueue(
+ "mbox_modem_rel");
+ if (!mb_di->mbox_modem_rel_wq) {
+ printk(KERN_ERR "mbox:failed to create work queue\n");
+ err = -ENOMEM;
+ goto free_mem;
+ }
+
+ INIT_WORK(&mb_di->mbox_modem_rel, mbox_modem_rel_work);
+
+ hrtimer_init(&ape_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ape_timer.function = mbox_ape_callback;
+ hrtimer_init(&modem_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ modem_timer.function = mbox_mod_callback;
+
+ atomic_set(&mb_di->ape_state, 0);
+ atomic_set(&mb_di->mod_req, 0);
+ atomic_set(&mb_di->mod_reset, 0);
+
+ err = request_irq(IRQ_DB5500_PRCMU_AC_WAKE_ACK,
+ mbox_prcmu_mod_req_ack_handler,
+ IRQF_NO_SUSPEND, "mod_req_ack", NULL);
+ if (err < 0) {
+ printk(KERN_ERR "mbox:Failed alloc IRQ_PRCMU_CA_SLEEP.\n");
+ goto free_irq;
+ }
+
+ init_completion(&mb_di->mod_req_ack_work);
+ mb = mb_di;
+ return platform_driver_probe(&mbox_driver, mbox_probe);
+free_irq:
+ destroy_workqueue(mb_di->mbox_modem_rel_wq);
+free_mem:
+ kfree(mb_di);
+ return err;
+}
+
+module_init(mbox_init);
+
+void __exit mbox_exit(void)
+{
+ free_irq(IRQ_DB5500_PRCMU_AC_WAKE_ACK, NULL);
+ destroy_workqueue(mb->mbox_modem_rel_wq);
+ platform_driver_unregister(&mbox_driver);
+ kfree(mb);
+}
+
+module_exit(mbox_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MBOX driver");
diff --git a/drivers/misc/mbox_channels-db5500.c b/drivers/misc/mbox_channels-db5500.c
new file mode 100644
index 00000000000..919be308ed4
--- /dev/null
+++ b/drivers/misc/mbox_channels-db5500.c
@@ -0,0 +1,1273 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Mailbox Logical Driver
+ *
+ * Author: Marcin Mielczarczyk <marcin.mielczarczyk@tieto.com> for ST-Ericsson.
+ * Bibek Basu ,bibek.basu@stericsson.com>
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <asm/mach-types.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <mach/mbox-db5500.h>
+#include <mach/mbox_channels-db5500.h>
+#include <linux/io.h>
+
+/* Defines start sequence number for given mailbox channel */
+#define CHANNEL_START_SEQUENCE_NUMBER 0x80
+
+/* Defines number of channels per mailbox unit */
+#define CHANNELS_PER_MBOX_UNIT 256
+
+/*
+ * This macro builds mbox channel PDU header with following format:
+ * ---------------------------------------------------------------------------
+ * | | | | |
+ * | Sequence nmbr | Type | Length | Destination logical channel number |
+ * | | | | |
+ * ---------------------------------------------------------------------------
+ * 31 24 20 16 0
+ *
+ */
+#define BUILD_HEADER(chan, len, type, seq_no) \
+ ((chan) | (((len) & 0xf) << 16) | \
+ (((type) & 0xf) << 20) | ((seq_no) << 24))
+
+/* Returns type from mbox message header */
+#define GET_TYPE(mbox_msg) (((mbox_msg) >> 20) & 0xf)
+
+/* Returns channel number from mbox message header */
+#define GET_CHANNEL(mbox_msg) ((mbox_msg) & 0xffff)
+
+/* Returns length of payload from mbox message header */
+#define GET_LENGTH(mbox_msg) (((mbox_msg) >> 16) & 0xf)
+
+/* Returns sequence number from mbox message header */
+#define GET_SEQ_NUMBER(mbox_msg) (((mbox_msg) >> 24)
+
+enum mbox_msg{
+ MBOX_CLOSE,
+ MBOX_OPEN,
+ MBOX_SEND,
+ MBOX_CAST,
+ MBOX_ACK,
+ MBOX_NAK,
+};
+
+enum mbox_dir {
+ MBOX_TX,
+ MBOX_RX,
+};
+
+struct mbox_channel_mapping {
+ u16 chan_base;
+ u8 mbox_id;
+ enum mbox_dir direction;
+};
+
+/* This table maps mbox logical channel to mbox id and direction */
+static struct mbox_channel_mapping channel_mappings[] = {
+ {0x500, 2, MBOX_RX}, /* channel 5 maps to mbox 0.1, dsp->app (unsec) */
+ {0x900, 2, MBOX_TX}, /* channel 9 maps to mbox 0.0, app->dsp (unsec) */
+};
+
+/* This table specifies mailbox ids which mbox channels module will use */
+static u8 mbox_ids[] = {
+ 2, /* app <-> dsp (unsec) */
+};
+
+/**
+ * struct mbox_unit_status - current status of mbox unit
+ * @mbox_id : holds mbox unit identification number
+ * @mbox : holds mbox pointer after mbox_register() call
+ * @tx_chans : holds list of open tx mbox channels
+ * @tx_lock: lock for tx channel
+ * @rx_chans : holds list of open rx mbox channels
+ * @rx_lock: lock for rx channel
+ */
+struct mbox_unit_status {
+ u8 mbox_id;
+ struct mbox *mbox;
+ struct list_head tx_chans;
+ spinlock_t tx_lock;
+ struct list_head rx_chans;
+ spinlock_t rx_lock;
+};
+
+static struct {
+ struct platform_device *pdev;
+ struct mbox_unit_status mbox_unit[ARRAY_SIZE(mbox_ids)];
+} channels;
+
+/* This structure describes pending element for mbox tx channel */
+struct pending_elem {
+ struct list_head list;
+ u32 *data;
+ u8 length;
+};
+
+struct rx_pending_elem {
+ u32 buffer[MAILBOX_NR_OF_DATAWORDS];
+ u8 length;
+ void *priv;
+};
+
+struct rx_pending_elem rx_pending[NUM_DSP_BUFFER];
+
+/* This structure holds list of pending elements for mbox tx channel */
+struct tx_channel {
+ struct list_head pending;
+};
+
+/* Specific status for mbox rx channel */
+struct rx_channel {
+ struct list_head pending;
+ spinlock_t lock;
+ u32 buffer[MAILBOX_NR_OF_DATAWORDS];
+ u8 index;
+ u8 length;
+};
+
+/**
+ * struct channel_status - status of mbox channel - common for tx and rx
+ * @list : holds list of channels registered
+ * @channel : holds channel number
+ * @state : holds state of channel
+ * @cb: holds callback function forr rx channel
+ * @with_ack : holds if ack is needed
+ * @rx: holds pointer to rx_channel
+ * @tx : holds pointer to tx_channel
+ * @receive_wq : holds pointer to receive workqueue_struct
+ * @cast_wq : holds pointer to cast workqueue_struct
+ * @open_msg: holds work_struct for open msg
+ * @receive_msg : holds work_struct for receive msg
+ * @cast_msg: holds work_struct for cast msg
+ * @lock: holds lock for channel
+ */
+struct channel_status {
+ atomic_t rcv_counter;
+ struct list_head list;
+ u16 channel;
+ int state;
+ mbox_channel_cb_t *cb;
+ void *priv;
+ u8 seq_number;
+ bool with_ack;
+ struct rx_channel rx;
+ struct tx_channel tx;
+ struct workqueue_struct *receive_wq;
+ struct workqueue_struct *cast_wq;
+ struct work_struct open_msg;
+ struct work_struct receive_msg;
+ struct work_struct cast_msg;
+ struct mutex lock;
+};
+
+/* Checks if provided channel number is valid */
+static bool check_channel(u16 channel, enum mbox_dir direction)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(channel_mappings); i++) {
+ if ((channel >= channel_mappings[i].chan_base) &&
+ (channel < channel_mappings[i].chan_base +
+ CHANNELS_PER_MBOX_UNIT)) {
+ /* Check if direction of given channel is correct*/
+ if (channel_mappings[i].direction == direction)
+ return true;
+ else
+ break;
+ }
+ }
+ return false;
+}
+
+/* get the tx channel corresponding to the given rx channel */
+static u16 get_tx_channel(u16 channel)
+{
+ int i;
+ int relative_chan = 0;
+ int mbox_id = 0xFF;
+ u16 tx_channel = 0xFF;
+
+ for (i = 0; i < ARRAY_SIZE(channel_mappings); i++) {
+ if ((channel >= channel_mappings[i].chan_base) &&
+ (channel < channel_mappings[i].chan_base +
+ CHANNELS_PER_MBOX_UNIT)) {
+ /* Check if direction of given channel is correct*/
+ relative_chan = channel - channel_mappings[i].chan_base;
+ mbox_id = channel_mappings[i].mbox_id;
+
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(channel_mappings); i++) {
+ if ((mbox_id == channel_mappings[i].mbox_id) &&
+ (channel_mappings[i].direction == MBOX_TX))
+ tx_channel = channel_mappings[i].chan_base +
+ relative_chan;
+ }
+ return tx_channel;
+}
+
+/* Returns mbox unit id for given mbox channel */
+static int get_mbox_id(u16 channel)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(channel_mappings); i++) {
+ if ((channel >= channel_mappings[i].chan_base) &&
+ (channel < channel_mappings[i].chan_base +
+ CHANNELS_PER_MBOX_UNIT)) {
+ return channel_mappings[i].mbox_id;
+ }
+ }
+ /* There is no mbox unit registered for given channel */
+ return -EINVAL;
+}
+
+/* Returns mbox structure saved after mbox_register() call */
+static struct mbox *get_mbox(u16 channel)
+{
+ int i;
+ int mbox_id = get_mbox_id(channel);
+
+ if (mbox_id < 0) {
+ dev_err(&channels.pdev->dev, "couldn't get mbox id\n");
+ return NULL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(channels.mbox_unit); i++) {
+ if (channels.mbox_unit[i].mbox_id == mbox_id)
+ return channels.mbox_unit[i].mbox;
+ }
+ return NULL;
+}
+
+/* Returns pointer to rx mbox channels list for given mbox unit */
+static struct list_head *get_rx_list(u8 mbox_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mbox_ids); i++) {
+ if (channels.mbox_unit[i].mbox_id == mbox_id)
+ return &channels.mbox_unit[i].rx_chans;
+ }
+ return NULL;
+}
+
+/* Returns pointer to tx mbox channels list for given mbox unit */
+static struct list_head *get_tx_list(u8 mbox_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mbox_ids); i++) {
+ if (channels.mbox_unit[i].mbox_id == mbox_id)
+ return &channels.mbox_unit[i].tx_chans;
+ }
+ return NULL;
+}
+
+static int send_pdu(struct channel_status *chan_status, int command,
+ u16 channel)
+{
+ struct mbox *mbox;
+ u32 header = 0;
+ int ret = 0;
+ /* SEND PDU is not supported */
+ if (command == MBOX_SEND) {
+ dev_err(&channels.pdev->dev, "SEND command not implemented\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+ mbox = get_mbox(chan_status->channel);
+ if (mbox == NULL) {
+ dev_err(&channels.pdev->dev, "couldn't get mailbox\n");
+ ret = -ENOSYS;
+ goto exit;
+ }
+ /* For CAST type send all pending messages */
+ if (command == MBOX_CAST) {
+ struct list_head *pos, *n;
+
+ /* Send all pending messages from TX channel */
+ list_for_each_safe(pos, n, &chan_status->tx.pending) {
+ struct pending_elem *pending =
+ list_entry(pos, struct pending_elem, list);
+ int i;
+
+ header = BUILD_HEADER(channel,
+ pending->length,
+ command,
+ chan_status->seq_number);
+
+ ret = mbox_send(mbox, header, true);
+ if (ret < 0) {
+ dev_err(&channels.pdev->dev,
+ "failed to send header, err=%d\n", ret);
+ goto exit;
+ }
+
+ for (i = 0; i < pending->length; i++) {
+ ret = mbox_send(mbox, pending->data[i], true);
+ if (ret < 0) {
+ dev_err(&channels.pdev->dev,
+ "failed to send header, err=%d\n", ret);
+ goto exit;
+ }
+ }
+
+ /* Call client's callback that data is already sent */
+ if (chan_status->cb)
+ chan_status->cb(pending->data, pending->length,
+ chan_status->priv);
+ else
+ dev_err(&channels.pdev->dev,
+ "%s no callback provided:header 0x%x\n",
+ __func__, header);
+
+ /* Increment sequence number */
+ chan_status->seq_number++;
+
+ /* Remove and free element from the list */
+ list_del(&pending->list);
+ kfree(pending);
+ }
+ } else {
+ header = BUILD_HEADER(channel, 0,
+ command, chan_status->seq_number);
+
+ ret = mbox_send(mbox, header, true);
+ if (ret < 0)
+ dev_err(&channels.pdev->dev, "failed to send header\n");
+ /* Increment sequence number */
+ chan_status->seq_number++;
+ }
+
+exit:
+ return ret;
+}
+
+void mbox_handle_receive_msg(struct work_struct *work)
+{
+ struct channel_status *rx_chan = container_of(work,
+ struct channel_status,
+ receive_msg);
+
+ if (!atomic_read(&rx_chan->rcv_counter))
+ return;
+rcv_msg:
+ /* Call client's callback and reset state */
+ if (rx_chan->cb) {
+ static int rx_pending_count;
+ rx_chan->cb(rx_pending[rx_pending_count].buffer,
+ rx_pending[rx_pending_count].length,
+ rx_pending[rx_pending_count].priv);
+ rx_pending_count++;
+ if (rx_pending_count == NUM_DSP_BUFFER)
+ rx_pending_count = 0;
+ } else {
+ dev_err(&channels.pdev->dev,
+ "%s no callback provided\n", __func__);
+ }
+ if (atomic_dec_return(&rx_chan->rcv_counter) > 0)
+ goto rcv_msg;
+
+}
+
+void mbox_handle_open_msg(struct work_struct *work)
+{
+ struct channel_status *tx_chan = container_of(work,
+ struct channel_status,
+ open_msg);
+ /* Change channel state to OPEN */
+ tx_chan->state = MBOX_OPEN;
+ /* If pending list not empty, start sending data */
+ mutex_lock(&tx_chan->lock);
+ if (!list_empty(&tx_chan->tx.pending))
+ send_pdu(tx_chan, MBOX_CAST, tx_chan->channel);
+ mutex_unlock(&tx_chan->lock);
+}
+
+void mbox_handle_cast_msg(struct work_struct *work)
+{
+ struct channel_status *rx_chan = container_of(work,
+ struct channel_status,
+ cast_msg);
+ /* Check if channel is opened */
+ if (rx_chan->state == MBOX_CLOSE) {
+ /* Peer sent message to closed channel */
+ dev_err(&channels.pdev->dev,
+ "channel in wrong state\n");
+ }
+}
+
+static bool handle_receive_msg(u32 mbox_msg, struct channel_status *rx_chan)
+{
+ int i;
+ static int rx_pending_count;
+
+ if (rx_chan) {
+ /* Store received data in RX channel buffer */
+ rx_chan->rx.buffer[rx_chan->rx.index++] = mbox_msg;
+
+ /* Check if it's last data of PDU */
+ if (rx_chan->rx.index == rx_chan->rx.length) {
+ for (i = 0; i < MAILBOX_NR_OF_DATAWORDS; i++) {
+ rx_pending[rx_pending_count].buffer[i] =
+ rx_chan->rx.buffer[i];
+ }
+
+ rx_pending[rx_pending_count].length =
+ rx_chan->rx.length;
+ rx_pending[rx_pending_count].priv = rx_chan->priv;
+ rx_chan->rx.index = 0;
+ rx_chan->rx.length = 0;
+ rx_chan->state = MBOX_OPEN;
+ rx_chan->seq_number++;
+ rx_pending_count++;
+ if (rx_pending_count == NUM_DSP_BUFFER)
+ rx_pending_count = 0;
+ atomic_inc(&rx_chan->rcv_counter);
+ queue_work(rx_chan->receive_wq,
+ &rx_chan->receive_msg);
+ }
+ dev_dbg(&channels.pdev->dev, "%s OK\n", __func__);
+
+ return true;
+ }
+ return false;
+}
+
+static void handle_open_msg(u16 channel, u8 mbox_id)
+{
+ struct list_head *tx_list, *pos;
+ struct channel_status *tmp;
+ struct channel_status *tx_chan = NULL;
+ struct mbox_unit_status *mbox_unit;
+ channel = get_tx_channel(channel);
+ dev_dbg(&channels.pdev->dev, "%s mbox_id %d\tchannel %x\n",
+ __func__, mbox_id, channel);
+ /* Get TX channel for given mbox unit */
+ tx_list = get_tx_list(mbox_id);
+ if (tx_list == NULL) {
+ dev_err(&channels.pdev->dev, "given mbox id is not valid %d\n",
+ mbox_id);
+ return;
+ }
+ mbox_unit = container_of(tx_list, struct mbox_unit_status, tx_chans);
+ /* Search for channel in tx list */
+ spin_lock(&mbox_unit->tx_lock);
+ list_for_each(pos, tx_list) {
+ tmp = list_entry(pos, struct channel_status, list);
+ dev_dbg(&channels.pdev->dev, "tmp->channel=%d\n",
+ tmp->channel);
+ if (tmp->channel == channel)
+ tx_chan = tmp;
+ }
+ spin_unlock(&mbox_unit->tx_lock);
+ if (tx_chan) {
+ schedule_work(&tx_chan->open_msg);
+ } else {
+ /* No tx channel found on the list, allocate new element */
+ tx_chan = kzalloc(sizeof(*tx_chan), GFP_ATOMIC);
+ if (tx_chan == NULL) {
+ dev_err(&channels.pdev->dev,
+ "failed to allocate memory\n");
+ return;
+ }
+
+ /* Fill initial data and add this element to tx list */
+ tx_chan->channel = get_tx_channel(channel);
+ tx_chan->state = MBOX_OPEN;
+ tx_chan->seq_number = CHANNEL_START_SEQUENCE_NUMBER;
+ INIT_LIST_HEAD(&tx_chan->tx.pending);
+ INIT_WORK(&tx_chan->open_msg, mbox_handle_open_msg);
+ INIT_WORK(&tx_chan->cast_msg, mbox_handle_cast_msg);
+ INIT_WORK(&tx_chan->receive_msg, mbox_handle_receive_msg);
+ mutex_init(&tx_chan->lock);
+ spin_lock(&mbox_unit->tx_lock);
+ list_add_tail(&tx_chan->list, tx_list);
+ spin_unlock(&mbox_unit->tx_lock);
+ }
+}
+
+static void handle_cast_msg(u16 channel, struct channel_status *rx_chan,
+ u32 mbox_msg, bool send)
+{
+ dev_dbg(&channels.pdev->dev, " %s\n", __func__);
+ if (rx_chan) {
+ rx_chan->rx.buffer[0] = mbox_msg;
+ rx_chan->with_ack = send;
+ rx_chan->rx.length = GET_LENGTH(rx_chan->rx.buffer[0]);
+ if (rx_chan->rx.length <= MAILBOX_NR_OF_DATAWORDS &&
+ rx_chan->rx.length > 0) {
+ rx_chan->rx.index = 0;
+ rx_chan->state = MBOX_CAST;
+ }
+ queue_work(rx_chan->cast_wq,
+ &rx_chan->cast_msg);
+ } else {
+ /* Channel not found, peer sent wrong message */
+ dev_err(&channels.pdev->dev, "channel %d doesn't exist\n",
+ channel);
+ }
+}
+
+/*
+ * This callback is called whenever mbox unit receives data.
+ * priv parameter holds mbox unit id.
+ */
+static void mbox_cb(u32 mbox_msg, void *priv)
+{
+ u8 mbox_id = *(u8 *)priv;
+ struct list_head *rx_list;
+ u8 type = GET_TYPE(mbox_msg);
+ u16 channel = GET_CHANNEL(mbox_msg);
+ struct mbox_unit_status *mbox_unit;
+ struct list_head *pos;
+ struct channel_status *tmp;
+ struct channel_status *rx_chan = NULL;
+ bool is_Payload = 0;
+
+ dev_dbg(&channels.pdev->dev, "%s type %d\t, mbox_msg %x\n",
+ __func__, type, mbox_msg);
+
+ /* Get RX channels list for given mbox unit */
+ rx_list = get_rx_list(mbox_id);
+ if (rx_list == NULL) {
+ dev_err(&channels.pdev->dev, "given mbox id is not valid %d\n",
+ mbox_id);
+ return;
+ }
+
+ mbox_unit = container_of(rx_list, struct mbox_unit_status, rx_chans);
+ /* Search for channel in rx list */
+ spin_lock(&mbox_unit->rx_lock);
+ list_for_each(pos, rx_list) {
+ tmp = list_entry(pos, struct channel_status, list);
+ if (tmp->state == MBOX_SEND ||
+ tmp->state == MBOX_CAST) {
+ /* Received message is payload */
+ is_Payload = 1;
+ rx_chan = tmp;
+ } else
+ if (tmp->channel == channel)
+ rx_chan = tmp;
+ }
+ spin_unlock(&mbox_unit->rx_lock);
+ /* if callback is present for that RX channel */
+ if (rx_chan && rx_chan->cb) {
+ /* If received message is payload this
+ * function will take care of it
+ */
+ if ((is_Payload) && (handle_receive_msg(mbox_msg, rx_chan)))
+ return;
+ } else
+ dev_err(&channels.pdev->dev, "callback not present:msg 0x%x "
+ "rx_chan 0x%x\n", mbox_msg, (u32)rx_chan);
+
+ /* Received message is header as no RX channel is in SEND/CAST state */
+ switch (type) {
+ case MBOX_CLOSE:
+ /* Not implemented */
+ break;
+ case MBOX_OPEN:
+ handle_open_msg(channel, mbox_id);
+ break;
+ case MBOX_SEND:
+ /* if callback is present for that RX channel */
+ if (rx_chan && rx_chan->cb)
+ handle_cast_msg(channel, rx_chan, mbox_msg, true);
+ break;
+ case MBOX_CAST:
+ /* if callback is present for that RX channel */
+ if (rx_chan && rx_chan->cb)
+ handle_cast_msg(channel, rx_chan, mbox_msg, false);
+ break;
+ case MBOX_ACK:
+ case MBOX_NAK:
+ /* Not implemented */
+ break;
+ }
+}
+
+/**
+ * mbox_channel_register() - Registers for a channel
+ * @channel: Channel Number.
+ * @cb: Pointer to function pointer mbox_channel_cb_t
+ * @priv: Pointer to private data
+ *
+ * This routine is used to register for a logical channel.
+ * It first does sanity check on the requested channel availability
+ * and parameters. Then it prepares internal entry for the channel.
+ * And send a OPEN request for that channel.
+ */
+int mbox_channel_register(u16 channel, mbox_channel_cb_t *cb, void *priv)
+{
+ struct channel_status *rx_chan;
+ struct list_head *pos, *rx_list;
+ int res = 0;
+ struct mbox_unit_status *mbox_unit;
+
+ dev_dbg(&channels.pdev->dev, " %s channel = %d\n", __func__, channel);
+ /* Check for callback fcn */
+ if (cb == NULL) {
+ dev_err(&channels.pdev->dev,
+ "channel callback missing:channel %d\n", channel);
+ res = -EINVAL;
+ goto exit;
+ }
+
+ /* Check if provided channel number is valid */
+ if (!check_channel(channel, MBOX_RX)) {
+ dev_err(&channels.pdev->dev, "wrong mbox channel number %d\n",
+ channel);
+ res = -EINVAL;
+ goto exit;
+ }
+
+ rx_list = get_rx_list(get_mbox_id(channel));
+ if (rx_list == NULL) {
+ dev_err(&channels.pdev->dev, "given mbox id is not valid\n");
+ res = -EINVAL;
+ goto exit;
+ }
+
+ mbox_unit = container_of(rx_list, struct mbox_unit_status, rx_chans);
+
+ /* Check if channel is already registered */
+ spin_lock(&mbox_unit->rx_lock);
+ list_for_each(pos, rx_list) {
+ rx_chan = list_entry(pos, struct channel_status, list);
+
+ if (rx_chan->channel == channel) {
+ dev_dbg(&channels.pdev->dev,
+ "channel already registered\n");
+ rx_chan->cb = cb;
+ rx_chan->priv = priv;
+ spin_unlock(&mbox_unit->rx_lock);
+ goto exit;
+ }
+ }
+ spin_unlock(&mbox_unit->rx_lock);
+
+ rx_chan = kzalloc(sizeof(*rx_chan), GFP_KERNEL);
+ if (rx_chan == NULL) {
+ dev_err(&channels.pdev->dev,
+ "couldn't allocate channel status\n");
+ res = -ENOMEM;
+ goto exit;
+ }
+
+ atomic_set(&rx_chan->rcv_counter, 0);
+ /* Fill out newly allocated element and add it to rx list */
+ rx_chan->channel = channel;
+ rx_chan->cb = cb;
+ rx_chan->priv = priv;
+ rx_chan->seq_number = CHANNEL_START_SEQUENCE_NUMBER;
+ mutex_init(&rx_chan->lock);
+ INIT_LIST_HEAD(&rx_chan->rx.pending);
+ rx_chan->cast_wq = create_singlethread_workqueue("mbox_cast_msg");
+ if (!rx_chan->cast_wq) {
+ dev_err(&channels.pdev->dev, "failed to create work queue\n");
+ res = -ENOMEM;
+ goto error_cast_wq;
+ }
+ rx_chan->receive_wq = create_singlethread_workqueue("mbox_receive_msg");
+ if (!rx_chan->receive_wq) {
+ dev_err(&channels.pdev->dev, "failed to create work queue\n");
+ res = -ENOMEM;
+ goto error_recv_wq;
+ }
+ INIT_WORK(&rx_chan->open_msg, mbox_handle_open_msg);
+ INIT_WORK(&rx_chan->cast_msg, mbox_handle_cast_msg);
+ INIT_WORK(&rx_chan->receive_msg, mbox_handle_receive_msg);
+ spin_lock(&mbox_unit->rx_lock);
+ list_add_tail(&rx_chan->list, rx_list);
+ spin_unlock(&mbox_unit->rx_lock);
+
+ mutex_lock(&rx_chan->lock);
+ res = send_pdu(rx_chan, MBOX_OPEN, get_tx_channel(rx_chan->channel));
+ if (res) {
+ dev_err(&channels.pdev->dev, "failed to send OPEN command\n");
+ spin_lock(&mbox_unit->rx_lock);
+ list_del(&rx_chan->list);
+ spin_unlock(&mbox_unit->rx_lock);
+ mutex_unlock(&rx_chan->lock);
+ goto error_send_pdu;
+ } else {
+ rx_chan->seq_number++;
+ rx_chan->state = MBOX_OPEN;
+ mutex_unlock(&rx_chan->lock);
+ return res;
+ }
+error_send_pdu:
+ flush_workqueue(rx_chan->receive_wq);
+error_recv_wq:
+ flush_workqueue(rx_chan->cast_wq);
+error_cast_wq:
+ kfree(rx_chan);
+exit:
+ return res;
+}
+EXPORT_SYMBOL(mbox_channel_register);
+
+/**
+ * mbox_channel_deregister() - DeRegisters for a channel
+ * @channel: Channel Number.
+ *
+ * This routine is used to deregister for a logical channel.
+ * It first does sanity check on the requested channel availability
+ * and parameters. Then it deletes the channel
+ */
+int mbox_channel_deregister(u16 channel)
+{
+ struct channel_status *rx_chan = NULL;
+ struct list_head *pos, *rx_list;
+ int res = 0;
+ struct mbox_unit_status *mbox_unit;
+
+ dev_dbg(&channels.pdev->dev, " %s channel = %d\n", __func__, channel);
+ /* Check if provided channel number is valid */
+ if (!check_channel(channel, MBOX_RX)) {
+ dev_err(&channels.pdev->dev, "wrong mbox channel number %d\n",
+ channel);
+ res = -EINVAL;
+ goto exit;
+ }
+
+ rx_list = get_rx_list(get_mbox_id(channel));
+ if (rx_list == NULL) {
+ dev_err(&channels.pdev->dev, "given mbox id is not valid\n");
+ res = -EINVAL;
+ goto exit;
+ }
+
+ mbox_unit = container_of(rx_list, struct mbox_unit_status, rx_chans);
+
+ /* Check if channel is already registered */
+ spin_lock(&mbox_unit->rx_lock);
+ list_for_each(pos, rx_list) {
+ rx_chan = list_entry(pos, struct channel_status, list);
+
+ if (rx_chan->channel == channel) {
+ dev_dbg(&channels.pdev->dev,
+ "channel found\n");
+ rx_chan->cb = NULL;
+ }
+ }
+ list_del(&rx_chan->list);
+ spin_unlock(&mbox_unit->rx_lock);
+ flush_workqueue(rx_chan->cast_wq);
+ flush_workqueue(rx_chan->receive_wq);
+ kfree(rx_chan);
+
+exit:
+ return res;
+}
+EXPORT_SYMBOL(mbox_channel_deregister);
+
+/**
+ * mbox_channel_send() - Send messages
+ * @msg: Pointer to mbox_channel_msg data structure.
+ *
+ * This routine is used to send messages over the registered logical
+ * TX channel. It first does sanity check on the message paramenters.
+ * It registered channel is not found then it just registers for that
+ * channel. If channel found, it puts the message to the pending list.
+ * If channel is OPEN, it then pushes the message to the mailbox in
+ * FIFO manner from the pending list.
+ */
+int mbox_channel_send(struct mbox_channel_msg *msg)
+{
+ struct list_head *pos, *tx_list;
+ struct channel_status *tmp = NULL;
+ struct channel_status *tx_chan = NULL;
+ struct pending_elem *pending;
+ struct mbox_unit_status *mbox_unit;
+ int res = 0;
+
+ if (msg->length > MAILBOX_NR_OF_DATAWORDS || msg->length == 0) {
+ dev_err(&channels.pdev->dev, "data length incorrect\n");
+ res = -EINVAL;
+ goto exit;
+ }
+
+ if (!check_channel(msg->channel, MBOX_TX)) {
+ dev_err(&channels.pdev->dev, "wrong channel number %d\n",
+ msg->channel);
+ res = -EINVAL;
+ goto exit;
+ }
+
+ tx_list = get_tx_list(get_mbox_id(msg->channel));
+ if (tx_list == NULL) {
+ dev_err(&channels.pdev->dev, "given mbox id is not valid\n");
+ res = -EINVAL;
+ goto exit;
+ }
+
+ mbox_unit = container_of(tx_list, struct mbox_unit_status, tx_chans);
+
+ spin_lock(&mbox_unit->tx_lock);
+ dev_dbg(&channels.pdev->dev, "send:tx_list=%x\tmbox_unit=%x\n",
+ (u32)tx_list, (u32)mbox_unit);
+ list_for_each(pos, tx_list) {
+ tmp = list_entry(pos, struct channel_status, list);
+ if (tmp->channel == msg->channel)
+ tx_chan = tmp;
+ }
+ spin_unlock(&mbox_unit->tx_lock);
+ /* Allocate pending element and add it to the list */
+ pending = kzalloc(sizeof(*pending), GFP_KERNEL);
+ if (pending == NULL) {
+ dev_err(&channels.pdev->dev,
+ "couldn't allocate memory for pending\n");
+ res = -ENOMEM;
+ goto exit;
+ }
+ pending->data = msg->data;
+ pending->length = msg->length;
+
+ if (tx_chan) {
+ mutex_lock(&tx_chan->lock);
+ list_add_tail(&pending->list, &tx_chan->tx.pending);
+ tx_chan->cb = msg->cb;
+ tx_chan->priv = msg->priv;
+ /* If channel is already opened start sending data */
+ if (tx_chan->state == MBOX_OPEN)
+ send_pdu(tx_chan, MBOX_CAST, tx_chan->channel);
+ /* Stop processing here */
+ mutex_unlock(&tx_chan->lock);
+ } else {
+ /* No channel found on the list, allocate new element */
+ tx_chan = kzalloc(sizeof(*tx_chan), GFP_KERNEL);
+ if (tx_chan == NULL) {
+ dev_err(&channels.pdev->dev,
+ "couldn't allocate memory for \
+ tx_chan\n");
+ res = -ENOMEM;
+ goto exit;
+ }
+ tx_chan->channel = msg->channel;
+ tx_chan->cb = msg->cb;
+ tx_chan->priv = msg->priv;
+ tx_chan->state = MBOX_CLOSE;
+ tx_chan->seq_number = CHANNEL_START_SEQUENCE_NUMBER;
+ INIT_LIST_HEAD(&tx_chan->tx.pending);
+ INIT_WORK(&tx_chan->open_msg, mbox_handle_open_msg);
+ INIT_WORK(&tx_chan->cast_msg, mbox_handle_cast_msg);
+ INIT_WORK(&tx_chan->receive_msg, mbox_handle_receive_msg);
+ mutex_init(&tx_chan->lock);
+ spin_lock(&mbox_unit->tx_lock);
+ list_add_tail(&tx_chan->list, tx_list);
+ spin_unlock(&mbox_unit->tx_lock);
+ mutex_lock(&tx_chan->lock);
+ list_add_tail(&pending->list, &tx_chan->tx.pending);
+ mutex_unlock(&tx_chan->lock);
+ }
+ return 0;
+
+exit:
+ return res;
+}
+EXPORT_SYMBOL(mbox_channel_send);
+
+static void revoke_pending_msgs(struct channel_status *tx_chan)
+{
+ struct list_head *pos, *n;
+ struct pending_elem *pending;
+
+ list_for_each_safe(pos, n, &tx_chan->tx.pending) {
+ pending = list_entry(pos, struct pending_elem, list);
+
+ if (tx_chan->cb)
+ tx_chan->cb(pending->data, pending->length,
+ tx_chan->priv);
+ else
+ dev_err(&channels.pdev->dev,
+ "%s no callback provided\n", __func__);
+ list_del(&pending->list);
+ kfree(pending);
+ }
+}
+
+/**
+ * mbox_channel_revoke_messages() - Revoke pending messages
+ * @channel: Channel on which action to be taken.
+ *
+ * This routine Clear all pending messages from TX channel
+ * It searches for the channel.Checks if there is pending
+ * messages.Calls if tehre is any registered function. And
+ * deletes the messages for the pending list.
+ */
+int mbox_channel_revoke_messages(u16 channel)
+{
+ struct list_head *pos, *tx_list;
+ struct channel_status *tmp;
+ struct channel_status *tx_chan = NULL;
+ struct mbox_unit_status *mbox_unit;
+ int res = 0;
+
+ if (!check_channel(channel, MBOX_TX)) {
+ dev_err(&channels.pdev->dev,
+ "wrong channel number %d\n", channel);
+ return -EINVAL;
+ }
+
+ tx_list = get_tx_list(get_mbox_id(channel));
+ if (tx_list == NULL) {
+ dev_err(&channels.pdev->dev, "given mbox id is not valid\n");
+ return -EINVAL;
+ }
+
+ mbox_unit = container_of(tx_list, struct mbox_unit_status, tx_chans);
+
+ spin_lock(&mbox_unit->tx_lock);
+ list_for_each(pos, tx_list) {
+ tmp = list_entry(pos, struct channel_status, list);
+ if (tmp->channel == channel)
+ tx_chan = tmp;
+ }
+ spin_unlock(&mbox_unit->tx_lock);
+
+ if (tx_chan) {
+ mutex_lock(&tx_chan->lock);
+ revoke_pending_msgs(tx_chan);
+ mutex_unlock(&tx_chan->lock);
+ dev_dbg(&channels.pdev->dev, "channel %d cleared\n",
+ channel);
+ } else {
+ dev_err(&channels.pdev->dev, "no channel found\n");
+ res = -EINVAL;
+ }
+
+ dev_dbg(&channels.pdev->dev, "%s exiting %d\n", __func__, res);
+ return res;
+}
+EXPORT_SYMBOL(mbox_channel_revoke_messages);
+
+#if defined(CONFIG_DEBUG_FS)
+#define MBOXTEST_DEBUG 1
+#ifdef MBOXTEST_DEBUG
+#define DBG_TEST(x) x
+#else
+#define DBG_TEST(x)
+#endif
+
+#define MBOX_TEST_MAX_WORDS 3
+#define MBOX_RX_CHAN 0x500
+#define MBOX_TX_RX_CHANNEL_DIFF 0x400
+#define MBOX_MAX_NUM_TRANSFER 30000
+static int registration_done;
+/**
+ * struct mboxtest_data - mbox test via debugfs information
+ * @rx_buff: Buffer for incomming data
+ * @rx_pointer: Ptr to actual RX data buff
+ * @tx_buff: Buffer for outgoing data
+ * @tx_pointer: Ptr to actual TX data buff
+ * @tx_done: TX Transfer done indicator
+ * @rx_done: RX Transfer done indicator
+ * @received: Received words
+ * @xfer_words: Num of bytes in actual trf
+ * @xfers: Number of transfers
+ * @words: Number of total words
+ * @channel: Channel test number
+ */
+struct mboxtest_data {
+ unsigned int *rx_buff;
+ unsigned int *rx_pointer;
+ unsigned int *tx_buff;
+ unsigned int *tx_pointer;
+ struct completion tx_done;
+ struct completion rx_done;
+ int received;
+ int xfer_words;
+ int xfers;
+ int words;
+ int channel;
+};
+
+static void mboxtest_receive_cb(u32 *data, u32 len, void *arg)
+{
+ struct mboxtest_data *mboxtest = (struct mboxtest_data *) arg;
+ int i;
+
+ printk(KERN_INFO "receive_cb.. data.= 0x%X, len = %d\n",
+ *data, len);
+ for (i = 0; i < len; i++)
+ *(mboxtest->rx_pointer++) = *(data++);
+
+ mboxtest->received += len;
+
+ printk(KERN_INFO "received = %d, words = %d\n",
+ mboxtest->received, mboxtest->words);
+ if (mboxtest->received >= mboxtest->words)
+ complete(&mboxtest->rx_done);
+ dev_dbg(&channels.pdev->dev, "%s exiting\n", __func__);
+}
+
+static void mboxtest_send_cb(u32 *data, u32 len, void *arg)
+{
+ struct mboxtest_data *mboxtest = (struct mboxtest_data *) arg;
+
+ printk(KERN_INFO "send_cb.. data.= 0x%X, len = %d\n",
+ *data, len);
+
+ complete(&mboxtest->tx_done);
+ dev_dbg(&channels.pdev->dev, "kernel:mboxtest_send_cb exiting\n");
+}
+
+static int mboxtest_transmit(struct mboxtest_data *mboxtest)
+{
+ int status = 0;
+ struct mbox_channel_msg msg;
+
+ dev_dbg(&channels.pdev->dev, "%s entering\n", __func__);
+ init_completion(&mboxtest->tx_done);
+
+ msg.channel = mboxtest->channel;
+ msg.data = mboxtest->tx_pointer;
+ msg.length = mboxtest->words;
+ msg.cb = mboxtest_send_cb;
+ msg.priv = mboxtest;
+
+ status = mbox_channel_send(&msg);
+ if (!status) {
+ mboxtest->tx_pointer += mboxtest->xfer_words;
+ wait_for_completion(&mboxtest->tx_done);
+ }
+
+ dev_dbg(&channels.pdev->dev, "%s exiting %d\n",
+ __func__, status);
+ return status;
+}
+
+static int transfer_test(struct mboxtest_data *mboxtest)
+{
+ int status = 0;
+ int len = 0;
+ int i;
+
+ len = mboxtest->words;
+
+ dev_dbg(&channels.pdev->dev, "%s enterring\n", __func__);
+ /* Allocate buffers */
+ mboxtest->rx_buff = kzalloc(sizeof(unsigned int) * len, GFP_KERNEL);
+ if (!mboxtest->rx_buff) {
+ DBG_TEST(printk(KERN_INFO
+ "Cannot allocate mbox rx memory\n"));
+ status = -ENOMEM;
+ goto err1;
+ }
+ memset(mboxtest->rx_buff, '\0', sizeof(unsigned int) * len);
+
+ mboxtest->tx_buff = kzalloc(sizeof(unsigned int) * len, GFP_KERNEL);
+ if (!mboxtest->tx_buff) {
+ DBG_TEST(printk(KERN_INFO
+ "Cannot allocate mbox tx memory\n"));
+ status = -ENOMEM;
+ goto err2;
+ }
+ memset(mboxtest->tx_buff, '\0', sizeof(unsigned int) * len);
+
+ /* Generate data */
+ get_random_bytes((unsigned char *)mboxtest->tx_buff,
+ sizeof(unsigned int) * len);
+ /* Set pointers */
+ mboxtest->tx_pointer = mboxtest->tx_buff;
+ mboxtest->rx_pointer = mboxtest->rx_buff;
+ mboxtest->received = 0;
+ init_completion(&mboxtest->rx_done);
+
+ /* Start tx transfer test transfer */
+ status = mboxtest_transmit(mboxtest);
+ DBG_TEST(printk(KERN_INFO "xfer_words=%d\n",
+ mboxtest->xfer_words));
+ if (!status)
+ wait_for_completion(&mboxtest->rx_done);
+ for (i = 0; i < len; i++)
+ DBG_TEST(printk(KERN_INFO "%d -> TX:0x%X, RX:0x%X\n", i,
+ mboxtest->tx_buff[i], mboxtest->rx_buff[i]));
+
+ dev_dbg(&channels.pdev->dev, "%s exiting %d\n", __func__, status);
+ return status;
+err2:
+ kfree(mboxtest->rx_buff);
+err1:
+ return status;
+}
+
+static int mboxtest_prepare(struct mboxtest_data *mboxtest)
+{
+ int err = 0;
+
+ mboxtest->xfers = MBOX_MAX_NUM_TRANSFER;
+ /* Calculate number of bytes in each transfer */
+ mboxtest->xfer_words = mboxtest->words / mboxtest->xfers;
+
+ /* Trim to maxiumum data words per transfer */
+ if (mboxtest->xfer_words > MBOX_TEST_MAX_WORDS) {
+ DBG_TEST(printk(KERN_INFO "Recalculating xfers ...\n"));
+ mboxtest->xfer_words = MBOX_TEST_MAX_WORDS;
+ if (mboxtest->words % mboxtest->xfer_words)
+ mboxtest->xfers = (mboxtest->words /
+ mboxtest->xfer_words) + 1;
+ else
+ mboxtest->xfers = (mboxtest->words /
+ mboxtest->xfer_words);
+ }
+
+ DBG_TEST(printk(KERN_INFO "Params: chan=0x%X words=%d, xfers=%d\n",
+ mboxtest->channel, mboxtest->words,
+ mboxtest->xfers));
+
+ if (mbox_channel_register(mboxtest->channel,
+ mboxtest_receive_cb, mboxtest)) {
+ DBG_TEST(printk(KERN_INFO "Cannot register mbox channel\n"));
+ err = -ENOMEM;
+ goto err;
+ }
+
+ registration_done = true;
+ return 0;
+err:
+ return err;
+}
+
+struct mboxtest_data mboxtest;
+/*
+ * Expected input: <nbr_channel> <nbr_word>
+ * Example: "echo 500 2"
+ */
+static ssize_t mbox_write_channel(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ unsigned long nbr_channel;
+ unsigned long nbr_word;
+ char int_buf[16];
+ char *token;
+ char *val;
+
+ strncpy((char *) &int_buf, buf, sizeof(int_buf));
+ token = (char *) &int_buf;
+
+ /* Parse message */
+ val = strsep(&token, " ");
+ if ((val == NULL) || (strict_strtoul(val, 16, &nbr_channel) != 0))
+ nbr_channel = MBOX_RX_CHAN;
+
+ val = strsep(&token, " ");
+ if ((val == NULL) || (strict_strtoul(val, 16, &nbr_word) != 0))
+ nbr_word = 2;
+
+ dev_dbg(dev, "Will setup logical channel %ld\n", nbr_channel);
+ mboxtest.channel = nbr_channel;
+ mboxtest.words = nbr_word;
+
+ if (!registration_done)
+ mboxtest_prepare(&mboxtest);
+ else
+ dev_dbg(&channels.pdev->dev, "already registration done\n");
+
+ return count;
+}
+
+static ssize_t mbox_read_channel(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+
+ unsigned long i;
+ static bool config_done;
+
+ if (!config_done) {
+ config_done = true;
+ mboxtest.channel += MBOX_TX_RX_CHANNEL_DIFF;
+ }
+ dev_dbg(dev, "Will transfer %d words %d times at channel 0x%x\n",
+ mboxtest.words, mboxtest.xfers, mboxtest.channel);
+ for (i = 0; i < mboxtest.xfers; i++)
+ transfer_test(&mboxtest);
+
+ return 1;
+}
+static DEVICE_ATTR(channel, S_IWUGO | S_IRUGO, mbox_read_channel,
+ mbox_write_channel);
+
+#endif
+
+static int __init mbox_channel_probe(struct platform_device *pdev)
+{
+ int i, ret = 0;
+ struct mbox *mbox;
+
+ dev_dbg(&(pdev->dev), "Probing mailbox (pdev = 0x%X)...\n", (u32)pdev);
+
+ /* Register to given mailbox units (ids) */
+ for (i = 0; i < ARRAY_SIZE(mbox_ids); i++) {
+ mbox = mbox_setup(mbox_ids[i], mbox_cb, &mbox_ids[i]);
+ if (mbox == NULL) {
+ dev_err(&(pdev->dev), "Unable to setup mailbox %d\n",
+ mbox_ids[i]);
+ ret = -EBUSY;
+ goto exit;
+ }
+ channels.mbox_unit[i].mbox_id = mbox_ids[i];
+ channels.mbox_unit[i].mbox = mbox;
+ INIT_LIST_HEAD(&channels.mbox_unit[i].rx_chans);
+ INIT_LIST_HEAD(&channels.mbox_unit[i].tx_chans);
+ spin_lock_init(&channels.mbox_unit[i].rx_lock);
+ spin_lock_init(&channels.mbox_unit[i].tx_lock);
+ }
+
+ channels.pdev = pdev;
+
+ dev_dbg(&(pdev->dev), "Mailbox channel driver loaded\n");
+#if defined(CONFIG_DEBUG_FS)
+ ret = device_create_file(&(pdev->dev), &dev_attr_channel);
+ if (ret != 0)
+ dev_warn(&(pdev->dev),
+ "Unable to create mbox_channel sysfs entry");
+
+
+#endif
+exit:
+ return ret;
+}
+
+static struct platform_driver mbox_channel_driver = {
+ .driver = {
+ .name = "mbox_channel",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init mbox_channel_init(void)
+{
+ if (!machine_is_u5500())
+ return 0;
+
+ platform_device_register_simple("mbox_channel", 0, NULL, 0);
+
+ return platform_driver_probe(&mbox_channel_driver, mbox_channel_probe);
+}
+module_init(mbox_channel_init);
+
+static void __exit mbox_channel_exit(void)
+{
+ platform_driver_unregister(&mbox_channel_driver);
+}
+module_exit(mbox_channel_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MBOX channels driver");
diff --git a/drivers/misc/modem_audio/Kconfig b/drivers/misc/modem_audio/Kconfig
new file mode 100644
index 00000000000..5396868a9de
--- /dev/null
+++ b/drivers/misc/modem_audio/Kconfig
@@ -0,0 +1,6 @@
+config MODEM_AUDIO_DRIVER
+ bool "Modem Audio Driver"
+ depends on (U5500_MBOX && UX500_SOC_DB5500)
+ help
+ This module is used for read and write data between APE and
+ Access side in u5500 platform.
diff --git a/drivers/misc/modem_audio/Makefile b/drivers/misc/modem_audio/Makefile
new file mode 100644
index 00000000000..a5c1740ea48
--- /dev/null
+++ b/drivers/misc/modem_audio/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_MODEM_AUDIO_DRIVER) += mad.o
+
diff --git a/drivers/misc/modem_audio/mad.c b/drivers/misc/modem_audio/mad.c
new file mode 100644
index 00000000000..d31d78ba3f2
--- /dev/null
+++ b/drivers/misc/modem_audio/mad.c
@@ -0,0 +1,506 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2011
+ *
+ * Modem Audio Driver
+ *
+ * Author:Rahul Venkatram <rahul.venkatram@stericsson.com> for ST-Ericsson
+ * Haridhar KALVALA<haridhar.kalvala@stericsson.com> for ST-Ericsson
+ * Amaresh Mulage<amaresh.mulage@stericsson.com> for ST-Ericsson.
+ *
+ * License terms:GNU General Public License (GPLv2)version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/poll.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/fcntl.h>
+#include <linux/spinlock.h>
+#include <mach/mbox_channels-db5500.h>
+
+MODULE_DESCRIPTION("Modem Audio Driver");
+MODULE_LICENSE("GPLv2");
+
+/**
+ * -----------------------------------------------------
+ * | | | |
+ * | Data[0] |Data[1] |Data[2] |===>Data word 32 bits
+ * -----------------------------------------------------
+ * | MESSAGE |Data | Index |
+ * | TYPE |length | number |===>READ/WRITE message
+ * -----------------------------------------------------
+ * -----------------------------------------------------
+ * | MESSAGE | DSP SHM addr | max_no_of_buffers |===> READ
+ * | TYPE | to write data | ||buffersize |WRITE SETUP message
+ * -----------------------------------------------------
+ */
+
+
+#define MAD_NAME "mad"
+/* Bit mask */
+#define MASK_UPPER_WORD 0xFFFF
+
+/* channel values for each direction */
+#define CHANNEL_NUM_RX 0x500
+#define CHANNEL_NUM_TX 0x900
+
+/*
+ * Maximum number of datawords which can be sent
+ * in the mailbox each word is 32 bits
+ */
+#define MAX_NR_OF_DATAWORDS MAILBOX_NR_OF_DATAWORDS
+#define MAX_NUM_RX_BUFF NUM_DSP_BUFFER
+#define NR_OF_DATAWORDS_REQD_FOR_ACK 1
+
+/**
+ * Message types, must be identical in DSP Side
+ * VCS_MBOX_MSG_WRITE_IF_SETUP : DSP -> ARM
+ * VCS_MBOX_MSG_WRITE_IF_SETUP_ACK : ARM -> DSP
+ * VCS_MBOX_MSG_READ_IF_SETUP : DSP -> ARM
+ * VCS_MBOX_MSG_READ_IF_SETUP_ACK : ARM -> DSP
+ * VCS_MBOX_MSG_IF_ENC_DATA : ARM -> DSP
+ * VCS_MBOX_MSG_IF_DEC_DATA : DSP -> ARM
+ */
+#define VCS_MBOX_MSG_WRITE_IF_SETUP 0x200
+#define VCS_MBOX_MSG_WRITE_IF_SETUP_ACK 0x201
+#define VCS_MBOX_MSG_READ_IF_SETUP 0x400
+#define VCS_MBOX_MSG_READ_IF_SETUP_ACK 0x401
+#define VCS_MBOX_MSG_IF_ENC_DATA 0x80
+#define VCS_MBOX_MSG_IF_DEC_DATA 0x100
+
+/**
+ * struct mad_data - This structure holds the state of the Modem Audio Driver.
+ *
+ * @dsp_shm_write_ptr : Ptr to the first TX buffer in DSP
+ * @dsp_shm_read_ptr : Ptr to the first RX buffer in DSP
+ * @max_tx_buffs : No. of DSP buffers available to write
+ * @max_rx_buffs : No. of DSP buffers available to read
+ * @write_offset : Size of each buffer in the DSP
+ * @read_offset : Size of each buffer in the DSP
+ * @rx_buff : Buffer for incoming data
+ * @tx_buff : Buffer for outgoing data
+ * @tx_buffer_num : Buffer counter for writing to DSP
+ * @rx_buffer_num : Buffer counter for reading to DSP
+ * @rx_buffer_read : Buffer counter for reading from userspace
+ * @data_written : RX data message arrival indicator
+ * @read_setup_msg : flag for opening read data
+ * @readq : read queue of data message
+ * @lock : lock for r/w message queue
+ */
+struct mad_data {
+ void __iomem *dsp_shm_write_ptr;
+ void __iomem *dsp_shm_read_ptr;
+ int max_tx_buffs;
+ int max_rx_buffs;
+ int write_offset;
+ int read_offset;
+ u32 *rx_buff;
+ u32 *tx_buff;
+ int tx_buffer_num;
+ int rx_buffer_num;
+ int rx_buffer_read;
+ u32 data_written;
+ bool read_setup_msg;
+ bool open_check;
+ wait_queue_head_t readq;
+ spinlock_t lock;
+};
+
+static struct mad_data *mad;
+
+static void mad_receive_cb(u32 *data, u32 length, void *priv);
+static int mad_read(struct file *filp, char __user *buff, size_t count,
+ loff_t *offp);
+static int mad_write(struct file *filp, const char __user *buff, size_t count,
+ loff_t *offp);
+static unsigned int mad_select(struct file *filp, poll_table *wait);
+static void mad_send_cb(u32 *data, u32 len, void *arg);
+static int mad_open(struct inode *ino, struct file *filp);
+static int mad_close(struct inode *ino, struct file *filp);
+
+static const struct file_operations mad_fops = {
+ .release = mad_close,
+ .open = mad_open,
+ .read = mad_read,
+ .write = mad_write,
+ .poll = mad_select,
+ .owner = THIS_MODULE,
+};
+
+static struct miscdevice mad_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = MAD_NAME,
+ .fops = &mad_fops
+};
+
+/**
+ * mad_send_cb - This function is default callback for send.
+ * @data -Pointer to the data buffer
+ * @len -Data buffer length
+ * @arg -Private data pointer associated with test
+ */
+static void mad_send_cb(u32 *data, u32 len, void *arg)
+{
+ dev_dbg(mad_dev.this_device, "%s", __func__);
+}
+
+/**
+ * mad_receive_cb - This callback function is for receiving data from mailbox
+ * @data -Pointer to the data buffer
+ * @len -length of the Mailbox
+ * @arg -Private data pointer associated with test
+ */
+static void mad_receive_cb(u32 *data, u32 length, void *priv)
+{
+ struct mad_data *mad = priv;
+ struct mbox_channel_msg msg;
+ u32 ack_to_dsp;
+ unsigned long flags;
+
+ /* setup message for write address */
+ if (*data == VCS_MBOX_MSG_WRITE_IF_SETUP) {
+
+ ack_to_dsp = VCS_MBOX_MSG_WRITE_IF_SETUP_ACK;
+
+ /* if setup message comes again.unmap */
+ if (mad->dsp_shm_write_ptr != NULL) {
+ iounmap(mad->dsp_shm_write_ptr);
+ mad->dsp_shm_write_ptr = NULL;
+ mad->write_offset = 0;
+ mad->max_tx_buffs = 0;
+ }
+
+ /* convert offset to uint size */
+ mad->write_offset = (data[2] & MASK_UPPER_WORD);
+ mad->max_tx_buffs = (data[2] >> 16);
+
+ mad->dsp_shm_write_ptr = ioremap(data[1],
+ mad->max_tx_buffs * mad->write_offset);
+ if (mad->dsp_shm_write_ptr == NULL)
+ dev_err(mad_dev.this_device, "incrt write address");
+
+ /* Initialize all buffer numbers */
+ mad->tx_buffer_num = 0;
+
+ /* Send ACK to the DSP */
+ msg.channel = CHANNEL_NUM_TX;
+ msg.data = &ack_to_dsp;
+ msg.length = NR_OF_DATAWORDS_REQD_FOR_ACK;
+ msg.cb = mad_send_cb;
+ msg.priv = mad;
+
+ if (mbox_channel_send(&msg))
+ dev_err(mad_dev.this_device, "%s: can't send data\n",
+ __func__);
+
+ } /* setup message for reading SHM */
+ else if (*data == VCS_MBOX_MSG_READ_IF_SETUP) {
+
+ ack_to_dsp = VCS_MBOX_MSG_READ_IF_SETUP_ACK;
+
+ /* if setup message comes again.unmap */
+ if (mad->dsp_shm_read_ptr != NULL) {
+ iounmap(mad->dsp_shm_read_ptr);
+ mad->dsp_shm_read_ptr = NULL;
+ mad->read_offset = 0;
+ mad->max_rx_buffs = 0;
+ }
+
+ /*convert offset to uint size*/
+ mad->read_offset = (data[2] & MASK_UPPER_WORD);
+ mad->max_rx_buffs = data[2] >> 16;
+
+ mad->dsp_shm_read_ptr = ioremap(data[1],
+ mad->max_rx_buffs * mad->read_offset);
+
+ /* Initialize all buffer numbers and flags */
+ mad->rx_buffer_num = 0;
+ mad->rx_buffer_read = 0;
+ mad->data_written = 0;
+
+ /* Send ACK to the DSP */
+ msg.channel = CHANNEL_NUM_TX;
+ msg.data = &ack_to_dsp;
+ msg.length = NR_OF_DATAWORDS_REQD_FOR_ACK;
+ msg.cb = mad_send_cb;
+ msg.priv = mad;
+
+ if (mbox_channel_send(&msg))
+ dev_err(mad_dev.this_device, "%s: can't send data\n",
+ __func__);
+
+ /* allow read */
+ spin_lock_irqsave(&mad->lock, flags);
+ mad->read_setup_msg = true;
+ spin_unlock_irqrestore(&mad->lock, flags);
+ /* blocked in select() */
+ wake_up_interruptible(&mad->readq);
+
+ } else if (*data == VCS_MBOX_MSG_IF_DEC_DATA) {
+ /*
+ * Check if you have valid message with proper length in message
+ * otherwise Dont care
+ */
+ if ((data[1] <= 0) || (mad->rx_buff == NULL)
+ || (mad->dsp_shm_read_ptr == NULL)) {
+ if (mad->rx_buff == NULL)
+ dev_warn(mad_dev.this_device, "%s :MAD closed",
+ __func__);
+ else
+ dev_warn(mad_dev.this_device, "%s :0-len msg",
+ __func__);
+ } else {
+ mad->rx_buff[mad->rx_buffer_num] = data[1];
+ mad->rx_buffer_num++;
+
+ /* store the offset */
+ mad->rx_buff[mad->rx_buffer_num] = data[2];
+
+ if (mad->rx_buffer_num < ((MAX_NUM_RX_BUFF * 2)-1))
+ mad->rx_buffer_num++;
+ else
+ mad->rx_buffer_num = 0;
+
+ spin_lock_irqsave(&mad->lock, flags);
+ mad->data_written++;
+
+ if (mad->data_written > MAX_NUM_RX_BUFF) {
+ dev_warn(mad_dev.this_device,
+ "%s :Read msg overflow = %u\n",
+ __func__ , mad->data_written);
+ /*
+ * Donot exceed MAX_NUM_RX_BUFF size of buffer
+ * TO DO overflow control
+ */
+ mad->data_written = MAX_NUM_RX_BUFF ;
+ }
+ spin_unlock_irqrestore(&mad->lock, flags);
+ wake_up_interruptible(&mad->readq);
+ }
+ } else {
+ /* received Invalid message */
+ dev_err(mad_dev.this_device, "%s : Invalid Msg", __func__);
+ }
+}
+
+static int mad_read(struct file *filp, char __user *buff, size_t count,
+ loff_t *offp)
+{
+ unsigned long flags;
+ unsigned int size = 0;
+ void __iomem *shm_ptr = NULL;
+
+ dev_dbg(mad_dev.this_device, "%s", __func__);
+
+ if (!(mad->data_written > 0)) {
+ if (wait_event_interruptible(mad->readq,
+ ((mad->data_written > 0) &&
+ (mad->dsp_shm_read_ptr != NULL))))
+ return -ERESTARTSYS;
+ }
+
+ if (mad->dsp_shm_read_ptr == NULL) {
+ dev_err(mad_dev.this_device, "%s :pointer err", __func__);
+ return -EINVAL ;
+ }
+
+ if (mad->rx_buff[mad->rx_buffer_read] > count) {
+ /*
+ * Size of message greater than buffer , this shouldnt happen
+ * It shouldnt come here : we ensured that message size
+ * smaller that buffer length
+ */
+ dev_err(mad_dev.this_device, "%s : Incrct length", __func__);
+ return -EFAULT;
+ }
+ size = mad->rx_buff[mad->rx_buffer_read];
+ mad->rx_buff[mad->rx_buffer_read] = 0;
+ mad->rx_buffer_read++;
+ shm_ptr = (u8 *)(mad->dsp_shm_read_ptr +
+ (mad->rx_buff[mad->rx_buffer_read] * mad->read_offset));
+ if (copy_to_user(buff, shm_ptr, size) < 0) {
+ dev_err(mad_dev.this_device, "%s :copy to user", __func__);
+ return -EFAULT;
+ }
+
+ if (mad->rx_buffer_read < ((MAX_NUM_RX_BUFF*2)-1))
+ mad->rx_buffer_read++;
+ else
+ mad->rx_buffer_read = 0;
+
+ spin_lock_irqsave(&mad->lock, flags);
+ mad->data_written--;
+ if (mad->data_written < 0) {
+ /* Means wrong read*/
+ mad->data_written = 0;
+ dev_err(mad_dev.this_device, "%s :data Rcev err", __func__);
+ }
+ spin_unlock_irqrestore(&mad->lock, flags);
+ return size;
+}
+
+static int mad_write(struct file *filp, const char __user *buff, size_t count,
+ loff_t *offp)
+{
+ int retval = 0;
+ void __iomem *dsp_write_address;
+ struct mbox_channel_msg msg;
+
+ dev_dbg(mad_dev.this_device, "%s", __func__);
+
+ /* check for valid write pointer else skip writing*/
+ if (mad->dsp_shm_write_ptr == NULL) {
+ dev_err(mad_dev.this_device, "%s :Illegal memory", __func__);
+ return -EFAULT;
+ }
+
+ dsp_write_address = (mad->dsp_shm_write_ptr +
+ (mad->tx_buffer_num * mad->write_offset));
+
+ if (copy_from_user(dsp_write_address, buff, count)) {
+ dev_err(mad_dev.this_device, "%s:copy_from_user\n", __func__);
+ return -EFAULT;
+ }
+
+ mad->tx_buff[0] = VCS_MBOX_MSG_IF_ENC_DATA;
+ mad->tx_buff[1] = count;
+ mad->tx_buff[2] = mad->tx_buffer_num;
+
+ if (mad->tx_buffer_num < (mad->max_tx_buffs-1))
+ mad->tx_buffer_num++;
+ else
+ mad->tx_buffer_num = 0;
+
+ msg.channel = CHANNEL_NUM_TX;
+ msg.data = mad->tx_buff;
+ msg.length = MAX_NR_OF_DATAWORDS;
+ msg.cb = mad_send_cb;
+ msg.priv = mad;
+
+ retval = mbox_channel_send(&msg);
+ if (retval) {
+ dev_err(mad_dev.this_device, "%s:can't send data", __func__);
+ return retval;
+ }
+ return count;
+}
+
+static unsigned int mad_select(struct file *filp, poll_table *wait)
+{
+ unsigned int mask = 0;
+ unsigned long flags;
+
+ dev_dbg(mad_dev.this_device, "%s", __func__);
+
+ poll_wait(filp, &mad->readq, wait);
+ spin_lock_irqsave(&mad->lock, flags);
+
+ if ((true == mad->read_setup_msg) && (mad->data_written > 0))
+ mask |= POLLIN | POLLRDNORM; /* allow readable */
+ spin_unlock_irqrestore(&mad->lock, flags);
+
+ return mask;
+}
+
+static int mad_open(struct inode *ino, struct file *filp)
+{
+ int err = 0;
+
+ dev_dbg(mad_dev.this_device, "%s", __func__);
+
+ if (mad->open_check == true) {
+ dev_err(mad_dev.this_device, "%s :Already opened", __func__);
+ return -EFAULT;
+ }
+
+ mad->rx_buff = kzalloc((MAX_NUM_RX_BUFF*2 *
+ sizeof(mad->rx_buff)), GFP_KERNEL);
+
+ if (mad->rx_buff == NULL) {
+ dev_err(mad_dev.this_device, "%s:RX memory\n", __func__);
+ err = -ENOMEM;
+ goto error;
+ }
+
+ mad->tx_buff = kzalloc(MAX_NR_OF_DATAWORDS, GFP_KERNEL);
+ if (mad->tx_buff == NULL) {
+ dev_err(mad_dev.this_device, "%s:TX memory\n", __func__);
+ err = -ENOMEM;
+ goto error;
+ }
+
+ /* Init spinlock for critical section access*/
+ spin_lock_init(&mad->lock);
+ init_waitqueue_head(&(mad->readq));
+
+ err = mbox_channel_register(CHANNEL_NUM_RX, mad_receive_cb, mad);
+ if (err) {
+ dev_err(mad_dev.this_device, "%s: register err", __func__);
+ err = -EFAULT;
+ goto error;
+ }
+ mad->open_check = true;
+
+ return 0;
+error:
+ kfree(mad->rx_buff);
+ kfree(mad->tx_buff);
+ return err;
+}
+
+static int mad_close(struct inode *ino, struct file *filp)
+{
+ dev_dbg(mad_dev.this_device, "%s", __func__);
+
+ if (mbox_channel_deregister(CHANNEL_NUM_RX)) {
+ dev_err(mad_dev.this_device, "%s:deregister err", __func__);
+ return -EFAULT;
+ }
+ kfree(mad->rx_buff);
+ kfree(mad->tx_buff);
+ mad->data_written = 0;
+ mad->rx_buffer_num = 0;
+ mad->rx_buffer_read = 0;
+ mad->open_check = false;
+
+ return 0;
+}
+
+static int __init mad_init(void)
+{
+ dev_dbg(mad_dev.this_device, "%s", __func__);
+
+ mad = kzalloc(sizeof(*mad), GFP_KERNEL);
+ if (mad == NULL) {
+ dev_err(mad_dev.this_device, "%s :MAD failed", __func__);
+ return -ENOMEM;
+ }
+
+ return misc_register(&mad_dev);
+}
+module_init(mad_init);
+
+static void __exit mad_exit(void)
+{
+ dev_dbg(mad_dev.this_device, "%s", __func__);
+
+ if (mad->dsp_shm_write_ptr != NULL) {
+ iounmap(mad->dsp_shm_write_ptr);
+ mad->dsp_shm_write_ptr = NULL;
+ }
+
+ if (mad->dsp_shm_read_ptr != NULL) {
+ iounmap(mad->dsp_shm_read_ptr);
+ mad->dsp_shm_read_ptr = NULL;
+ }
+
+ kfree(mad);
+ misc_deregister(&mad_dev);
+}
diff --git a/drivers/misc/sim_detect.c b/drivers/misc/sim_detect.c
new file mode 100644
index 00000000000..e67f4fad3db
--- /dev/null
+++ b/drivers/misc/sim_detect.c
@@ -0,0 +1,306 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: BIBEK BASU <bibek.basu@stericsson.com>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/hrtimer.h>
+#include <linux/workqueue.h>
+#include <linux/uaccess.h>
+#include <linux/modem/modem_client.h>
+#include <mach/sim_detect.h>
+#include <linux/regulator/consumer.h>
+
+/* time in millisec */
+#define TIMER_DELAY 10
+
+struct sim_detect{
+ struct work_struct timer_expired;
+ struct device *dev;
+ struct modem *modem;
+ struct hrtimer timer;
+ struct mutex lock;
+ int voltage;
+ struct regulator *vinvsim_regulator;
+ bool regulator_enabled;
+};
+
+static ssize_t show_voltage(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sim_detect *data = dev_get_drvdata(dev);
+ int ret, len;
+
+ ret = mutex_lock_interruptible(&data->lock);
+ if (ret < 0)
+ return ret;
+
+ len = sprintf(buf, "%i\n", data->voltage);
+
+ mutex_unlock(&data->lock);
+
+ return len;
+}
+
+static ssize_t write_voltage(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sim_detect *sim_detect = dev_get_drvdata(dev);
+ long val;
+ int ret;
+
+ /* check input */
+ if (strict_strtol(buf, 0, &val) != 0) {
+ dev_err(dev, "Invalid voltage class configured.\n");
+ return -EINVAL;
+ }
+
+ switch (val) {
+ case -1:
+ case 0:
+ case 1800000:
+ case 3000000:
+ break;
+ default:
+ dev_err(dev, "Invalid voltage class configured.\n");
+ return -EINVAL;
+ }
+
+ /* lock */
+ ret = mutex_lock_interruptible(&sim_detect->lock);
+ if (ret < 0)
+ return ret;
+
+ /* update state */
+ sim_detect->voltage = val;
+
+ /* call regulator */
+ switch (sim_detect->voltage) {
+ case 0:
+ /* SIM voltage is unknown, turn on regulator for 3 V SIM */
+ case 3000000:
+ /* Vinvsim supply is used only for 3 V SIM */
+ if (!sim_detect->regulator_enabled) {
+ ret = regulator_enable(sim_detect->vinvsim_regulator);
+ if (ret) {
+ dev_err(dev, "Failed to enable regulator.\n");
+ goto out_unlock;
+ }
+ sim_detect->regulator_enabled = true;
+ }
+ break;
+ case 1800000:
+ case -1:
+ /* Vbatvsim is used otherwise */
+ if (sim_detect->regulator_enabled) {
+ regulator_disable(sim_detect->vinvsim_regulator);
+ sim_detect->regulator_enabled = false;
+ }
+ }
+
+out_unlock:
+ /* unlock and return */
+ mutex_unlock(&sim_detect->lock);
+
+ return count;
+}
+
+static DEVICE_ATTR(voltage, S_IWUGO | S_IRUGO, show_voltage, write_voltage);
+
+static struct attribute *sim_attributes[] = {
+ &dev_attr_voltage.attr,
+ NULL
+};
+
+static const struct attribute_group sim_attr_group = {
+ .attrs = sim_attributes,
+};
+
+static void inform_modem_release(struct work_struct *work)
+{
+ struct sim_detect *sim_detect =
+ container_of(work, struct sim_detect, timer_expired);
+
+ /* call Modem Access Framework api to release modem */
+ modem_release(sim_detect->modem);
+}
+
+static enum hrtimer_restart timer_callback(struct hrtimer *timer)
+{
+ struct sim_detect *sim_detect =
+ container_of(timer, struct sim_detect, timer);
+
+ schedule_work(&sim_detect->timer_expired);
+ return HRTIMER_NORESTART;
+}
+
+static irqreturn_t sim_activity_irq(int irq, void *dev)
+{
+ struct sim_detect *sim_detect = dev;
+
+ /* call Modem Access Framework api to acquire modem */
+ modem_request(sim_detect->modem);
+ /* start the timer for 10ms */
+ hrtimer_start(&sim_detect->timer,
+ ktime_set(0, TIMER_DELAY*NSEC_PER_MSEC),
+ HRTIMER_MODE_REL);
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_PM
+/**
+ * sim_detect_suspend() - This routine puts the Sim detect in to sustend state.
+ * @dev: pointer to device structure.
+ *
+ * This routine checks the current ongoing communication with Modem by
+ * examining the modem_get_usage and work_pending state.
+ * accordingly prevents suspend if modem communication
+ * is on-going.
+ */
+int sim_detect_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct sim_detect *sim_detect = platform_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s called...\n", __func__);
+ /* if modem is accessed, event system suspend */
+ if (modem_get_usage(sim_detect->modem)
+ || work_pending(&sim_detect->timer_expired))
+ return -EBUSY;
+ else
+ return 0;
+}
+
+static const struct dev_pm_ops sim_detect_dev_pm_ops = {
+ .suspend = sim_detect_suspend,
+};
+#endif
+
+
+static int __devinit sim_detect_probe(struct platform_device *pdev)
+{
+ struct sim_detect_platform_data *plat = dev_get_platdata(&pdev->dev);
+ struct sim_detect *sim_detect;
+ int ret;
+
+ sim_detect = kzalloc(sizeof(struct sim_detect), GFP_KERNEL);
+ if (sim_detect == NULL) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ /* initialize data */
+ mutex_init(&sim_detect->lock);
+ sim_detect->voltage = 0;
+
+ sim_detect->dev = &pdev->dev;
+ INIT_WORK(&sim_detect->timer_expired, inform_modem_release);
+ hrtimer_init(&sim_detect->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ sim_detect->timer.function = timer_callback;
+
+ sim_detect->modem = modem_get(sim_detect->dev, "u8500-shrm-modem");
+ if (IS_ERR(sim_detect->modem)) {
+ ret = PTR_ERR(sim_detect->modem);
+ dev_err(sim_detect->dev, "Could not retrieve the modem\n");
+ goto out_free;
+ }
+
+ /* set drvdata */
+ platform_set_drvdata(pdev, sim_detect);
+
+ /* request irq */
+ ret = request_threaded_irq(plat->irq_num,
+ NULL, sim_activity_irq,
+ IRQF_TRIGGER_FALLING |
+ IRQF_TRIGGER_RISING |
+ IRQF_NO_SUSPEND,
+ "sim activity", sim_detect);
+ if (ret < 0)
+ goto out_put_modem;
+
+ /* get regulator */
+ sim_detect->regulator_enabled = false;
+ sim_detect->vinvsim_regulator = regulator_get(sim_detect->dev,
+ "vinvsim");
+ if (IS_ERR(sim_detect->vinvsim_regulator)) {
+ dev_err(&pdev->dev,
+ "Failed to get regulator. (dev_name %s).\n",
+ dev_name(sim_detect->dev));
+ ret = PTR_ERR(sim_detect->vinvsim_regulator);
+ goto out_free_irq;
+ }
+
+ /* register sysfs entry */
+ ret = sysfs_create_group(&pdev->dev.kobj, &sim_attr_group);
+ if (ret != 0) {
+ dev_err(&pdev->dev,
+ "Failed to create attribute group: %d\n", ret);
+ goto out_free_regulator;
+ }
+
+ return 0;
+
+out_free_regulator:
+ regulator_put(sim_detect->vinvsim_regulator);
+out_free_irq:
+ free_irq(plat->irq_num, sim_detect);
+out_put_modem:
+ modem_put(sim_detect->modem);
+ platform_set_drvdata(pdev, NULL);
+out_free:
+ kfree(sim_detect);
+ return ret;
+}
+
+static int __devexit sim_detect_remove(struct platform_device *pdev)
+{
+ struct sim_detect *sim_detect = platform_get_drvdata(pdev);
+
+ sysfs_remove_group(&pdev->dev.kobj, &sim_attr_group);
+ regulator_put(sim_detect->vinvsim_regulator);
+ modem_put(sim_detect->modem);
+ platform_set_drvdata(pdev, NULL);
+ kfree(sim_detect);
+ return 0;
+}
+
+static struct platform_driver sim_detect_driver = {
+ .driver = {
+ .name = "sim-detect",
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &sim_detect_dev_pm_ops,
+#endif
+ },
+ .probe = sim_detect_probe,
+ .remove = __devexit_p(sim_detect_remove),
+};
+
+static int __init sim_detect_init(void)
+{
+ return platform_driver_register(&sim_detect_driver);
+}
+module_init(sim_detect_init);
+
+static void __exit sim_detect_exit(void)
+{
+ platform_driver_unregister(&sim_detect_driver);
+}
+module_exit(sim_detect_exit);
+
+MODULE_AUTHOR("BIBEK BASU <bibek.basu@stericsson.com>");
+MODULE_DESCRIPTION("Detects SIM Hot Swap and wakes modem");
+MODULE_ALIAS("platform:sim-detect");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/stm.c b/drivers/misc/stm.c
new file mode 100644
index 00000000000..33bb26c27ca
--- /dev/null
+++ b/drivers/misc/stm.c
@@ -0,0 +1,850 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Pierre Peiffer <pierre.peiffer@stericsson.com> for ST-Ericsson.
+ * Philippe Langlais <philippe.Langlais@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/cdev.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <trace/stm.h>
+
+/* STM Registers */
+#define STM_CR (stm.virtbase)
+#define STM_MMC (stm.virtbase + 0x008)
+#define STM_TER (stm.virtbase + 0x010)
+#define STMPERIPHID0 (stm.virtbase + 0xFC0)
+#define STMPERIPHID1 (stm.virtbase + 0xFC8)
+#define STMPERIPHID2 (stm.virtbase + 0xFD0)
+#define STMPERIPHID3 (stm.virtbase + 0xFD8)
+#define STMPCELLID0 (stm.virtbase + 0xFE0)
+#define STMPCELLID1 (stm.virtbase + 0xFE8)
+#define STMPCELLID2 (stm.virtbase + 0xFF0)
+#define STMPCELLID3 (stm.virtbase + 0xFF8)
+
+#define STM_CLOCK_SHIFT 6
+#define STM_CLOCK_MASK 0x1C0
+
+/* Hardware mode for all sources */
+#define STM_MMC_DEFAULT CONFIG_STM_DEFAULT_MASTERS_MODES
+
+/* Max number of channels (multiple of 256) */
+#define STM_NUMBER_OF_CHANNEL CONFIG_STM_NUMBER_OF_CHANNEL
+
+/* # dynamically allocated channel with stm_trace_buffer */
+#define NB_KERNEL_DYNAMIC_CHANNEL 128
+
+static struct stm_device {
+ const struct stm_platform_data *pdata;
+ void __iomem *virtbase;
+ /* Used to register the allocated channels */
+ DECLARE_BITMAP(ch_bitmap, STM_NUMBER_OF_CHANNEL);
+} stm;
+
+volatile struct stm_channel __iomem *stm_channels;
+
+static struct cdev cdev;
+static struct class *stm_class;
+static int stm_major;
+
+static DEFINE_SPINLOCK(lock);
+
+/* Middle value for clock divisor */
+static enum clock_div stm_clockdiv = STM_CLOCK_DIV8;
+
+/* Default value for STM output connection */
+static enum stm_connection_type stm_connection = STM_DEFAULT_CONNECTION;
+
+#define STM_BUFSIZE 256
+struct channel_data {
+ DECLARE_BITMAP(bitmap, STM_NUMBER_OF_CHANNEL);
+ int numero;
+ spinlock_t lock;
+ u8 data_buffer[STM_BUFSIZE];
+};
+
+static u64 stm_printk_buf[1024/sizeof(u64)];
+static arch_spinlock_t stm_buf_lock =
+ (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
+
+static char *mipi60 = "none";
+module_param(mipi60, charp, S_IRUGO);
+MODULE_PARM_DESC(mipi60, "STM Trace to output on probe2 of mipi60 "
+ "('none' or 'ape' or 'modem')");
+
+static char *mipi34 = "none";
+module_param(mipi34, charp, S_IRUGO);
+MODULE_PARM_DESC(mipi34, "STM Trace to output on mipi34 "
+ "('none' or 'ape' or 'modem')");
+
+static char *microsd = "none";
+module_param(microsd, charp, S_IRUGO);
+MODULE_PARM_DESC(microsd, "STM Trace to output on SD card connector "
+ "('none' or 'ape' or 'modem')");
+
+static unsigned int stm_ter;
+module_param(stm_ter, uint, 0);
+MODULE_PARM_DESC(stm_ter, "Value for STM_TER (trace control register). "
+ "Should be set by user as environment variable stm.stm_ter");
+
+#define IS_APE_ON_MIPI34 (mipi34 && !strcmp(mipi34, "ape"))
+#define IS_APE_ON_MIPI60 (mipi60 && !strcmp(mipi60, "ape"))
+#define IS_APE_ON_MICROSD (microsd && !strcmp(microsd, "ape"))
+#define IS_MODEM_ON_MICROSD (microsd && !strcmp(microsd, "modem"))
+
+static int stm_connection_set(void *data, u64 val);
+
+int stm_alloc_channel(int offset)
+{
+ int channel;
+
+ /* Look for a free channel from offset */
+ do {
+ channel = find_next_zero_bit(stm.ch_bitmap,
+ STM_NUMBER_OF_CHANNEL, offset);
+ } while ((channel < STM_NUMBER_OF_CHANNEL)
+ && test_and_set_bit(channel, stm.ch_bitmap));
+ return channel;
+}
+EXPORT_SYMBOL(stm_alloc_channel);
+
+void stm_free_channel(int channel)
+{
+ clear_bit(channel, stm.ch_bitmap);
+}
+EXPORT_SYMBOL(stm_free_channel);
+
+static int stm_get_channel(struct channel_data *ch_data, int __user *arg)
+{
+ int channel, err;
+
+ channel = stm_alloc_channel(0);
+ if (channel < STM_NUMBER_OF_CHANNEL) {
+ /* One free found ! */
+ err = put_user(channel, arg);
+ if (err)
+ stm_free_channel(channel);
+ else
+ /* Register it in the context of the file */
+ set_bit(channel, ch_data->bitmap);
+ } else
+ err = -ENOMEM;
+ return err;
+}
+
+static int stm_release_channel(struct channel_data *ch_data, int channel)
+{
+ if ((channel < 0) || (channel >= STM_NUMBER_OF_CHANNEL))
+ return -EINVAL;
+ stm_free_channel(channel);
+ clear_bit(channel, ch_data->bitmap);
+ return 0;
+}
+
+/*
+ * Trace a buffer on a given channel
+ * with auto time stamping on last byte(s) only
+ */
+int stm_trace_buffer_onchannel(int channel,
+ const void *data, size_t length)
+{
+ int i, mod64;
+ volatile struct stm_channel __iomem *pch;
+
+ if (channel >= STM_NUMBER_OF_CHANNEL || !stm_channels)
+ return 0;
+
+ pch = &stm_channels[channel];
+
+ /* Align data pointer to u64 & time stamp last byte(s) */
+ mod64 = (int)data & 7;
+ i = length - 8 + mod64;
+ switch (mod64) {
+ case 0:
+ if (i)
+ pch->no_stamp64 = *(u64 *)data;
+ else {
+ pch->stamp64 = *(u64 *)data;
+ return length;
+ }
+ data += 8;
+ break;
+ case 1:
+ pch->no_stamp8 = *(u8 *)data;
+ pch->no_stamp16 = *(u16 *)(data+1);
+ if (i)
+ pch->no_stamp32 = *(u32 *)(data+3);
+ else {
+ pch->stamp32 = *(u32 *)(data+3);
+ return length;
+ }
+ data += 7;
+ break;
+ case 2:
+ pch->no_stamp16 = *(u16 *)data;
+ if (i)
+ pch->no_stamp32 = *(u32 *)(data+2);
+ else {
+ pch->stamp32 = *(u32 *)(data+2);
+ return length;
+ }
+ data += 6;
+ break;
+ case 3:
+ pch->no_stamp8 = *(u8 *)data;
+ if (i)
+ pch->no_stamp32 = *(u32 *)(data+1);
+ else {
+ pch->stamp32 = *(u32 *)(data+1);
+ return length;
+ }
+ data += 5;
+ break;
+ case 4:
+ if (i)
+ pch->no_stamp32 = *(u32 *)data;
+ else {
+ pch->stamp32 = *(u32 *)data;
+ return length;
+ }
+ data += 4;
+ break;
+ case 5:
+ pch->no_stamp8 = *(u8 *)data;
+ if (i)
+ pch->no_stamp16 = *(u16 *)(data+1);
+ else {
+ pch->stamp16 = *(u16 *)(data+1);
+ return length;
+ }
+ data += 3;
+ break;
+ case 6:
+ if (i)
+ pch->no_stamp16 = *(u16 *)data;
+ else {
+ pch->stamp16 = *(u16 *)data;
+ return length;
+ }
+ data += 2;
+ break;
+ case 7:
+ if (i)
+ pch->no_stamp8 = *(u8 *)data;
+ else {
+ pch->stamp8 = *(u8 *)data;
+ return length;
+ }
+ data++;
+ break;
+ }
+ for (;;) {
+ if (i > 8) {
+ pch->no_stamp64 = *(u64 *)data;
+ data += 8;
+ i -= 8;
+ } else if (i == 8) {
+ pch->stamp64 = *(u64 *)data;
+ break;
+ } else if (i > 4) {
+ pch->no_stamp32 = *(u32 *)data;
+ data += 4;
+ i -= 4;
+ } else if (i == 4) {
+ pch->stamp32 = *(u32 *)data;
+ break;
+ } else if (i > 2) {
+ pch->no_stamp16 = *(u16 *)data;
+ data += 2;
+ i -= 2;
+ } else if (i == 2) {
+ pch->stamp16 = *(u16 *)data;
+ break;
+ } else {
+ pch->stamp8 = *(u8 *)data;
+ break;
+ }
+ }
+ return length;
+}
+EXPORT_SYMBOL(stm_trace_buffer_onchannel);
+
+static int stm_open(struct inode *inode, struct file *file)
+{
+ struct channel_data *channel_data;
+ int retval = 0;
+
+ channel_data = kzalloc(sizeof(struct channel_data), GFP_KERNEL);
+ if (channel_data == NULL)
+ return -ENOMEM;
+
+ spin_lock_init(&channel_data->lock);
+ channel_data->numero = -1; /* Channel not yet allocated */
+ file->private_data = channel_data;
+
+ /*
+ * Check if microsd is selected as trace interface
+ * and enable corresponding pins muxing.
+ */
+ if (IS_MODEM_ON_MICROSD)
+ retval = stm_connection_set(NULL, STM_STE_MODEM_ON_MICROSD);
+ else if (IS_APE_ON_MICROSD)
+ retval = stm_connection_set(NULL, STM_STE_APE_ON_MICROSD);
+
+ if (retval)
+ pr_alert("stm_open: failed to connect STM output\n");
+
+ return retval;
+}
+
+static int stm_release(struct inode *inode, struct file *file)
+{
+ struct channel_data *channel;
+
+ channel = (struct channel_data *)file->private_data;
+
+ /* Free allocated channel if necessary */
+ if (channel->numero != -1)
+ stm_free_channel(channel->numero);
+
+ bitmap_andnot(stm.ch_bitmap, stm.ch_bitmap,
+ channel->bitmap, STM_NUMBER_OF_CHANNEL);
+
+ kfree(channel);
+ return 0;
+}
+
+static ssize_t stm_write(struct file *file, const char __user *buf,
+ size_t size, loff_t *off)
+{
+ struct channel_data *channel = file->private_data;
+
+ /* Alloc channel at first write */
+ if (channel->numero == -1) {
+ channel->numero = stm_alloc_channel(0);
+ if (channel->numero > STM_NUMBER_OF_CHANNEL)
+ return -ENOMEM;
+ }
+
+ if (size > STM_BUFSIZE)
+ size = STM_BUFSIZE;
+
+ spin_lock(&channel->lock);
+
+ if (copy_from_user
+ (channel->data_buffer, (void __user *) buf, size)) {
+ spin_unlock(&channel->lock);
+ return -EFAULT;
+ }
+ size = stm_trace_buffer_onchannel(channel->numero,
+ channel->data_buffer, size);
+
+ spin_unlock(&channel->lock);
+
+ return size;
+}
+
+static int stm_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ /*
+ * Don't allow a mapping that covers more than the STM channels
+ */
+ if ((vma->vm_end - vma->vm_start) >
+ STM_NUMBER_OF_CHANNEL*sizeof(struct stm_channel))
+ return -EINVAL;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ if (io_remap_pfn_range(vma, vma->vm_start,
+ stm.pdata->channels_phys_base>>PAGE_SHIFT,
+ STM_NUMBER_OF_CHANNEL*sizeof(struct stm_channel),
+ vma->vm_page_prot))
+ return -EAGAIN;
+
+ return 0;
+}
+
+/* Enable the trace for given sources (bitfield) */
+static void stm_enable_src(unsigned int v)
+{
+ unsigned int cr_val;
+ spin_lock(&lock);
+ cr_val = readl(STM_CR);
+ cr_val &= ~STM_CLOCK_MASK;
+ writel(cr_val|(stm_clockdiv<<STM_CLOCK_SHIFT), STM_CR);
+ /*
+ * If the kernel argument stm_ter has been set by the boot loader
+ * all calls to stm_enable_src will be ignored
+ */
+ v = stm_ter ? stm_ter : v;
+ writel(v, STM_TER);
+ spin_unlock(&lock);
+}
+
+/* Disable all sources */
+static void stm_disable_src(void)
+{
+ writel(0x0, STM_CR); /* stop clock */
+ writel(0x0, STM_TER); /* Disable cores */
+}
+
+/* Set clock speed */
+static int stm_set_ckdiv(enum clock_div v)
+{
+ unsigned int val;
+
+ spin_lock(&lock);
+ val = readl(STM_CR);
+ val &= ~STM_CLOCK_MASK;
+ writel(val | ((v << STM_CLOCK_SHIFT) & STM_CLOCK_MASK), STM_CR);
+ spin_unlock(&lock);
+ stm_clockdiv = v;
+
+ return 0;
+}
+
+/* Return the control register */
+static inline unsigned int stm_get_cr(void)
+{
+ return readl(STM_CR);
+}
+
+/*
+ * Set Trace MODE lossless/lossy (Software/Hardware)
+ * each bit represent the corresponding mode of this source
+ */
+static inline void stm_set_modes(unsigned int modes)
+{
+ writel(modes, STM_MMC);
+}
+
+/* Get Trace MODE lossless/lossy (Software/Hardware)
+ * each bit represent the corresponding mode of this source */
+static inline unsigned int stm_get_modes(void)
+{
+ return readl(STM_MMC);
+}
+
+/* Count # of free channels */
+static int stm_nb_free_channels(void)
+{
+ int nb_channels, offset;
+
+ nb_channels = 0;
+ offset = 0;
+ for (;;) {
+ offset = find_next_zero_bit(stm.ch_bitmap,
+ STM_NUMBER_OF_CHANNEL, offset);
+ if (offset == STM_NUMBER_OF_CHANNEL)
+ break;
+ offset++;
+ nb_channels++;
+ }
+ return nb_channels;
+}
+
+static long stm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int err = 0;
+ struct channel_data *channel = file->private_data;
+
+ switch (cmd) {
+
+ case STM_CONNECTION:
+ if (stm.pdata->stm_connection)
+ stm.pdata->stm_connection(arg);
+ stm_connection = arg;
+ break;
+
+ case STM_DISABLE:
+ stm_disable_src();
+ break;
+
+ case STM_GET_NB_MAX_CHANNELS:
+ err = put_user(STM_NUMBER_OF_CHANNEL, (unsigned int *)arg);
+ break;
+
+ case STM_GET_NB_FREE_CHANNELS:
+ err = put_user(stm_nb_free_channels(), (unsigned int *)arg);
+ break;
+
+ case STM_GET_CHANNEL_NO:
+ err = put_user(channel->numero, (unsigned int *)arg);
+ break;
+
+ case STM_SET_CLOCK_DIV:
+ err = stm_set_ckdiv((enum clock_div) arg);
+ break;
+
+ case STM_SET_MODE:
+ stm_set_modes(arg);
+ break;
+
+ case STM_GET_MODE:
+ err = put_user(stm_get_modes(), (unsigned int *)arg);
+ break;
+
+ case STM_GET_CTRL_REG:
+ err = put_user(stm_get_cr(), (unsigned int *)arg);
+ break;
+
+ case STM_ENABLE_SRC:
+ stm_enable_src(arg);
+ break;
+
+ case STM_GET_FREE_CHANNEL:
+ err = stm_get_channel(channel, (int *)arg);
+ break;
+
+ case STM_RELEASE_CHANNEL:
+ err = stm_release_channel(channel, arg);
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+/*
+ * Trace a buffer on a dynamically allocated channel
+ * with auto time stamping on the first byte(s) only
+ * Dynamic channel number >=
+ * STM_NUMBER_OF_CHANNEL - NB_KERNEL_DYNAMIC_CHANNEL
+ */
+int stm_trace_buffer(const void *data, size_t length)
+{
+ int channel;
+
+ channel = stm_alloc_channel(STM_NUMBER_OF_CHANNEL
+ - NB_KERNEL_DYNAMIC_CHANNEL);
+ if (channel < STM_NUMBER_OF_CHANNEL) {
+ length = stm_trace_buffer_onchannel(channel, data, length);
+ stm_free_channel(channel);
+ return length;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(stm_trace_buffer);
+
+static const struct file_operations stm_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = stm_ioctl,
+ .open = stm_open,
+ .llseek = no_llseek,
+ .write = stm_write,
+ .release = stm_release,
+ .mmap = stm_mmap,
+};
+
+/*
+ * Init and deinit driver
+ */
+
+static int __devinit stm_probe(struct platform_device *pdev)
+{
+ int retval = 0;
+
+ if (!pdev || !pdev->dev.platform_data) {
+ pr_alert("No device/platform_data found on STM driver\n");
+ return -ENODEV;
+ }
+
+ stm.pdata = pdev->dev.platform_data;
+
+ cdev_init(&cdev, &stm_fops);
+ cdev.owner = THIS_MODULE;
+
+ stm_channels =
+ ioremap_nocache(stm.pdata->channels_phys_base,
+ STM_NUMBER_OF_CHANNEL*sizeof(*stm_channels));
+ if (stm_channels == NULL) {
+ dev_err(&pdev->dev, "could not remap STM Msg register\n");
+ return -ENODEV;
+ }
+
+ stm.virtbase = ioremap_nocache(stm.pdata->regs_phys_base, SZ_4K);
+ if (stm.virtbase == NULL) {
+ retval = -EIO;
+ dev_err(&pdev->dev, "could not remap STM Register\n");
+ goto err_channels;
+ }
+
+ retval = cdev_add(&cdev, MKDEV(stm_major, 0), 1);
+ if (retval) {
+ dev_err(&pdev->dev, "chardev registration failed\n");
+ goto err_channels;
+ }
+
+ if (IS_ERR(device_create(stm_class, &pdev->dev,
+ MKDEV(stm_major, 0), NULL, STM_DEV_NAME)))
+ dev_err(&pdev->dev, "can't create device\n");
+
+ /* Check chip IDs if necessary */
+ if (stm.pdata->id_mask) {
+ u32 periph_id, cell_id;
+
+ periph_id = (readb(STMPERIPHID3)<<24) +
+ (readb(STMPERIPHID2)<<16) +
+ (readb(STMPERIPHID1)<<8) +
+ readb(STMPERIPHID0);
+ cell_id = (readb(STMPCELLID3)<<24) +
+ (readb(STMPCELLID2)<<16) +
+ (readb(STMPCELLID1)<<8) +
+ readb(STMPCELLID0);
+ /* Only warns if it isn't a ST-Ericsson supported one */
+ if ((periph_id & stm.pdata->id_mask) != 0x00080dec ||
+ cell_id != 0xb105f00d) {
+ dev_warn(&pdev->dev, "STM-Trace IC not compatible\n");
+ dev_warn(&pdev->dev, "periph_id=%x\n", periph_id);
+ dev_warn(&pdev->dev, "pcell_id=%x\n", cell_id);
+ }
+ }
+
+ /* Reserve channels if necessary */
+ if (stm.pdata->channels_reserved_sz) {
+ int i;
+
+ for (i = 0; i < stm.pdata->channels_reserved_sz; i++) {
+ set_bit(stm.pdata->channels_reserved[i],
+ stm.ch_bitmap);
+ }
+ }
+ /* Reserve kernel trace channels on demand */
+#ifdef CONFIG_STM_PRINTK
+ set_bit(CONFIG_STM_PRINTK_CHANNEL, stm.ch_bitmap);
+#endif
+#ifdef CONFIG_STM_FTRACE
+ set_bit(CONFIG_STM_FTRACE_CHANNEL, stm.ch_bitmap);
+#endif
+#ifdef CONFIG_STM_CTX_SWITCH
+ set_bit(CONFIG_STM_CTX_SWITCH_CHANNEL, stm.ch_bitmap);
+#endif
+#ifdef CONFIG_STM_WAKEUP
+ set_bit(CONFIG_STM_WAKEUP_CHANNEL, stm.ch_bitmap);
+#endif
+#ifdef CONFIG_STM_STACK_TRACE
+ set_bit(CONFIG_STM_STACK_TRACE_CHANNEL, stm.ch_bitmap);
+#endif
+#ifdef CONFIG_STM_TRACE_PRINTK
+ set_bit(CONFIG_STM_TRACE_PRINTK_CHANNEL, stm.ch_bitmap);
+ set_bit(CONFIG_STM_TRACE_BPRINTK_CHANNEL, stm.ch_bitmap);
+#endif
+
+ /* Check kernel's environment parameters first */
+ if (IS_APE_ON_MIPI34)
+ stm_connection = STM_STE_APE_ON_MIPI34_NONE_ON_MIPI60;
+ else if (IS_APE_ON_MIPI60)
+ stm_connection = STM_STE_MODEM_ON_MIPI34_APE_ON_MIPI60;
+
+ /* Apply parameters to driver */
+ if (stm.pdata->stm_connection) {
+ retval = stm.pdata->stm_connection(stm_connection);
+ if (retval) {
+ dev_err(&pdev->dev, "failed to connect STM output\n");
+ goto err_channels;
+ }
+ }
+
+ /* Enable STM Masters given in pdata */
+ if (stm.pdata->masters_enabled)
+ stm_enable_src(stm.pdata->masters_enabled);
+ stm_set_modes(STM_MMC_DEFAULT); /* Set all sources in HW mode */
+
+ dev_info(&pdev->dev, "STM-Trace driver probed successfully\n");
+ stm_printk("STM-Trace driver initialized\n");
+ return 0;
+
+err_channels:
+ iounmap(stm_channels);
+ return retval;
+}
+
+static int __devexit stm_remove(struct platform_device *pdev)
+{
+ device_destroy(stm_class, MKDEV(stm_major, 0));
+ cdev_del(&cdev);
+
+ if (stm.pdata->stm_connection)
+ (void) stm.pdata->stm_connection(STM_DISCONNECT);
+
+ stm_disable_src();
+ iounmap(stm.virtbase);
+ iounmap(stm_channels);
+
+ return 0;
+}
+
+int stm_printk(const char *fmt, ...)
+{
+ int ret;
+ size_t size;
+ va_list args;
+
+ va_start(args, fmt);
+ arch_spin_lock(&stm_buf_lock);
+ size = vscnprintf((char *)stm_printk_buf,
+ sizeof(stm_printk_buf), fmt, args);
+ ret = stm_trace_buffer(stm_printk_buf, size);
+ arch_spin_unlock(&stm_buf_lock);
+ va_end(args);
+ return ret;
+}
+EXPORT_SYMBOL(stm_printk);
+
+/*
+ * Debugfs interface
+ */
+
+static int stm_connection_show(void *data, u64 *val)
+{
+ *val = stm_connection;
+ return 0;
+}
+
+static int stm_connection_set(void *data, u64 val)
+{
+ int retval = 0;
+
+ if (stm.pdata->stm_connection) {
+ stm_connection = val;
+ retval = stm.pdata->stm_connection(val);
+ }
+ return retval;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(stm_connection_fops, stm_connection_show,
+ stm_connection_set, "%llu\n");
+
+static int stm_clockdiv_show(void *data, u64 *val)
+{
+ *val = stm_clockdiv;
+ return 0;
+}
+
+static int stm_clockdiv_set(void *data, u64 val)
+{
+ stm_set_ckdiv(val);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(stm_clockdiv_fops, stm_clockdiv_show,
+ stm_clockdiv_set, "%llu\n");
+
+static int stm_masters_enable_show(void *data, u64 *val)
+{
+ *val = readl(STM_TER);
+ return 0;
+}
+
+static int stm_masters_enable_set(void *data, u64 val)
+{
+ stm_enable_src(val);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(stm_masters_enable_fops, stm_masters_enable_show,
+ stm_masters_enable_set, "%08llx\n");
+
+static int stm_masters_modes_show(void *data, u64 *val)
+{
+ *val = stm_get_modes();
+ return 0;
+}
+
+static int stm_masters_modes_set(void *data, u64 val)
+{
+ stm_set_modes(val);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(stm_masters_modes_fops, stm_masters_modes_show,
+ stm_masters_modes_set, "%08llx\n");
+
+/* Count # of free channels */
+static int stm_free_channels_show(void *data, u64 *val)
+{
+ *val = stm_nb_free_channels();
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(stm_free_channels_fops, stm_free_channels_show,
+ NULL, "%lld\n");
+
+static __init int stm_init_debugfs(void)
+{
+ struct dentry *d_stm;
+
+ d_stm = debugfs_create_dir(STM_DEV_NAME, NULL);
+ if (!d_stm)
+ return -ENOMEM;
+
+ (void) debugfs_create_file("connection", S_IRUGO | S_IWUGO, d_stm,
+ NULL, &stm_connection_fops);
+ (void) debugfs_create_file("clockdiv", S_IRUGO | S_IWUGO, d_stm,
+ NULL, &stm_clockdiv_fops);
+ (void) debugfs_create_file("masters_enable", S_IRUGO | S_IWUGO, d_stm,
+ NULL, &stm_masters_enable_fops);
+ (void) debugfs_create_file("masters_modes", S_IRUGO | S_IWUGO, d_stm,
+ NULL, &stm_masters_modes_fops);
+ (void) debugfs_create_file("free_channels", S_IRUGO, d_stm,
+ NULL, &stm_free_channels_fops);
+ return 0;
+}
+fs_initcall(stm_init_debugfs);
+
+static struct platform_driver stm_driver = {
+ .probe = stm_probe,
+ .remove = __devexit_p(stm_remove),
+ .driver = {
+ .name = STM_DEV_NAME,
+ .owner = THIS_MODULE,
+ }
+};
+
+static int __init stm_init(void)
+{
+ int retval;
+ dev_t dev;
+
+ stm_class = class_create(THIS_MODULE, STM_DEV_NAME);
+ if (IS_ERR(stm_class)) {
+ pr_err("stm: can't register stm class\n");
+ return PTR_ERR(stm_class);
+ }
+
+ retval = alloc_chrdev_region(&dev, 0, 1, STM_DEV_NAME);
+ if (retval) {
+ pr_err("stm: can't register character device\n");
+ class_destroy(stm_class);
+ return retval;
+ }
+ stm_major = MAJOR(dev);
+ return platform_driver_register(&stm_driver);
+}
+
+static void __exit stm_exit(void)
+{
+ platform_driver_unregister(&stm_driver);
+ unregister_chrdev_region(MKDEV(stm_major, 0), 1);
+ class_destroy(stm_class);
+}
+
+arch_initcall(stm_init); /* STM init ASAP need to wait GPIO init */
+module_exit(stm_exit);
+
+MODULE_AUTHOR("Paul Ghaleb - ST Microelectronics");
+MODULE_AUTHOR("Pierre Peiffer - ST-Ericsson");
+MODULE_AUTHOR("Philippe Langlais - ST-Ericsson");
+MODULE_DESCRIPTION("System Trace Module driver");
+MODULE_ALIAS("stm");
+MODULE_ALIAS("stm-trace");
+MODULE_LICENSE("GPL v2");